xref: /netbsd-src/external/gpl3/gcc.old/dist/gcc/tree-eh.c (revision 8feb0f0b7eaff0608f8350bbfa3098827b4bb91b)
1 /* Exception handling semantics and decomposition for trees.
2    Copyright (C) 2003-2020 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
10 
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 GNU General Public License for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "rtl.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "cfghooks.h"
28 #include "tree-pass.h"
29 #include "ssa.h"
30 #include "cgraph.h"
31 #include "diagnostic-core.h"
32 #include "fold-const.h"
33 #include "calls.h"
34 #include "except.h"
35 #include "cfganal.h"
36 #include "cfgcleanup.h"
37 #include "tree-eh.h"
38 #include "gimple-iterator.h"
39 #include "tree-cfg.h"
40 #include "tree-into-ssa.h"
41 #include "tree-ssa.h"
42 #include "tree-inline.h"
43 #include "langhooks.h"
44 #include "cfgloop.h"
45 #include "gimple-low.h"
46 #include "stringpool.h"
47 #include "attribs.h"
48 #include "asan.h"
49 #include "gimplify.h"
50 
51 /* In some instances a tree and a gimple need to be stored in a same table,
52    i.e. in hash tables. This is a structure to do this. */
53 typedef union {tree *tp; tree t; gimple *g;} treemple;
54 
55 /* Misc functions used in this file.  */
56 
57 /* Remember and lookup EH landing pad data for arbitrary statements.
58    Really this means any statement that could_throw_p.  We could
59    stuff this information into the stmt_ann data structure, but:
60 
61    (1) We absolutely rely on this information being kept until
62    we get to rtl.  Once we're done with lowering here, if we lose
63    the information there's no way to recover it!
64 
65    (2) There are many more statements that *cannot* throw as
66    compared to those that can.  We should be saving some amount
67    of space by only allocating memory for those that can throw.  */
68 
69 /* Add statement T in function IFUN to landing pad NUM.  */
70 
71 static void
add_stmt_to_eh_lp_fn(struct function * ifun,gimple * t,int num)72 add_stmt_to_eh_lp_fn (struct function *ifun, gimple *t, int num)
73 {
74   gcc_assert (num != 0);
75 
76   if (!get_eh_throw_stmt_table (ifun))
77     set_eh_throw_stmt_table (ifun, hash_map<gimple *, int>::create_ggc (31));
78 
79   gcc_assert (!get_eh_throw_stmt_table (ifun)->put (t, num));
80 }
81 
82 /* Add statement T in the current function (cfun) to EH landing pad NUM.  */
83 
84 void
add_stmt_to_eh_lp(gimple * t,int num)85 add_stmt_to_eh_lp (gimple *t, int num)
86 {
87   add_stmt_to_eh_lp_fn (cfun, t, num);
88 }
89 
90 /* Add statement T to the single EH landing pad in REGION.  */
91 
92 static void
record_stmt_eh_region(eh_region region,gimple * t)93 record_stmt_eh_region (eh_region region, gimple *t)
94 {
95   if (region == NULL)
96     return;
97   if (region->type == ERT_MUST_NOT_THROW)
98     add_stmt_to_eh_lp_fn (cfun, t, -region->index);
99   else
100     {
101       eh_landing_pad lp = region->landing_pads;
102       if (lp == NULL)
103 	lp = gen_eh_landing_pad (region);
104       else
105 	gcc_assert (lp->next_lp == NULL);
106       add_stmt_to_eh_lp_fn (cfun, t, lp->index);
107     }
108 }
109 
110 
111 /* Remove statement T in function IFUN from its EH landing pad.  */
112 
113 bool
remove_stmt_from_eh_lp_fn(struct function * ifun,gimple * t)114 remove_stmt_from_eh_lp_fn (struct function *ifun, gimple *t)
115 {
116   if (!get_eh_throw_stmt_table (ifun))
117     return false;
118 
119   if (!get_eh_throw_stmt_table (ifun)->get (t))
120     return false;
121 
122   get_eh_throw_stmt_table (ifun)->remove (t);
123       return true;
124 }
125 
126 
127 /* Remove statement T in the current function (cfun) from its
128    EH landing pad.  */
129 
130 bool
remove_stmt_from_eh_lp(gimple * t)131 remove_stmt_from_eh_lp (gimple *t)
132 {
133   return remove_stmt_from_eh_lp_fn (cfun, t);
134 }
135 
136 /* Determine if statement T is inside an EH region in function IFUN.
137    Positive numbers indicate a landing pad index; negative numbers
138    indicate a MUST_NOT_THROW region index; zero indicates that the
139    statement is not recorded in the region table.  */
140 
141 int
lookup_stmt_eh_lp_fn(struct function * ifun,const gimple * t)142 lookup_stmt_eh_lp_fn (struct function *ifun, const gimple *t)
143 {
144   if (ifun->eh->throw_stmt_table == NULL)
145     return 0;
146 
147   int *lp_nr = ifun->eh->throw_stmt_table->get (const_cast <gimple *> (t));
148   return lp_nr ? *lp_nr : 0;
149 }
150 
151 /* Likewise, but always use the current function.  */
152 
153 int
lookup_stmt_eh_lp(const gimple * t)154 lookup_stmt_eh_lp (const gimple *t)
155 {
156   /* We can get called from initialized data when -fnon-call-exceptions
157      is on; prevent crash.  */
158   if (!cfun)
159     return 0;
160   return lookup_stmt_eh_lp_fn (cfun, t);
161 }
162 
163 /* First pass of EH node decomposition.  Build up a tree of GIMPLE_TRY_FINALLY
164    nodes and LABEL_DECL nodes.  We will use this during the second phase to
165    determine if a goto leaves the body of a TRY_FINALLY_EXPR node.  */
166 
167 struct finally_tree_node
168 {
169   /* When storing a GIMPLE_TRY, we have to record a gimple.  However
170      when deciding whether a GOTO to a certain LABEL_DECL (which is a
171      tree) leaves the TRY block, its necessary to record a tree in
172      this field.  Thus a treemple is used. */
173   treemple child;
174   gtry *parent;
175 };
176 
177 /* Hashtable helpers.  */
178 
179 struct finally_tree_hasher : free_ptr_hash <finally_tree_node>
180 {
181   static inline hashval_t hash (const finally_tree_node *);
182   static inline bool equal (const finally_tree_node *,
183 			    const finally_tree_node *);
184 };
185 
186 inline hashval_t
hash(const finally_tree_node * v)187 finally_tree_hasher::hash (const finally_tree_node *v)
188 {
189   return (intptr_t)v->child.t >> 4;
190 }
191 
192 inline bool
equal(const finally_tree_node * v,const finally_tree_node * c)193 finally_tree_hasher::equal (const finally_tree_node *v,
194 			    const finally_tree_node *c)
195 {
196   return v->child.t == c->child.t;
197 }
198 
199 /* Note that this table is *not* marked GTY.  It is short-lived.  */
200 static hash_table<finally_tree_hasher> *finally_tree;
201 
202 static void
record_in_finally_tree(treemple child,gtry * parent)203 record_in_finally_tree (treemple child, gtry *parent)
204 {
205   struct finally_tree_node *n;
206   finally_tree_node **slot;
207 
208   n = XNEW (struct finally_tree_node);
209   n->child = child;
210   n->parent = parent;
211 
212   slot = finally_tree->find_slot (n, INSERT);
213   gcc_assert (!*slot);
214   *slot = n;
215 }
216 
217 static void
218 collect_finally_tree (gimple *stmt, gtry *region);
219 
220 /* Go through the gimple sequence.  Works with collect_finally_tree to
221    record all GIMPLE_LABEL and GIMPLE_TRY statements. */
222 
223 static void
collect_finally_tree_1(gimple_seq seq,gtry * region)224 collect_finally_tree_1 (gimple_seq seq, gtry *region)
225 {
226   gimple_stmt_iterator gsi;
227 
228   for (gsi = gsi_start (seq); !gsi_end_p (gsi); gsi_next (&gsi))
229     collect_finally_tree (gsi_stmt (gsi), region);
230 }
231 
232 static void
collect_finally_tree(gimple * stmt,gtry * region)233 collect_finally_tree (gimple *stmt, gtry *region)
234 {
235   treemple temp;
236 
237   switch (gimple_code (stmt))
238     {
239     case GIMPLE_LABEL:
240       temp.t = gimple_label_label (as_a <glabel *> (stmt));
241       record_in_finally_tree (temp, region);
242       break;
243 
244     case GIMPLE_TRY:
245       if (gimple_try_kind (stmt) == GIMPLE_TRY_FINALLY)
246         {
247           temp.g = stmt;
248           record_in_finally_tree (temp, region);
249           collect_finally_tree_1 (gimple_try_eval (stmt),
250 				  as_a <gtry *> (stmt));
251 	  collect_finally_tree_1 (gimple_try_cleanup (stmt), region);
252         }
253       else if (gimple_try_kind (stmt) == GIMPLE_TRY_CATCH)
254         {
255           collect_finally_tree_1 (gimple_try_eval (stmt), region);
256           collect_finally_tree_1 (gimple_try_cleanup (stmt), region);
257         }
258       break;
259 
260     case GIMPLE_CATCH:
261       collect_finally_tree_1 (gimple_catch_handler (
262 				 as_a <gcatch *> (stmt)),
263 			      region);
264       break;
265 
266     case GIMPLE_EH_FILTER:
267       collect_finally_tree_1 (gimple_eh_filter_failure (stmt), region);
268       break;
269 
270     case GIMPLE_EH_ELSE:
271       {
272 	geh_else *eh_else_stmt = as_a <geh_else *> (stmt);
273 	collect_finally_tree_1 (gimple_eh_else_n_body (eh_else_stmt), region);
274 	collect_finally_tree_1 (gimple_eh_else_e_body (eh_else_stmt), region);
275       }
276       break;
277 
278     default:
279       /* A type, a decl, or some kind of statement that we're not
280 	 interested in.  Don't walk them.  */
281       break;
282     }
283 }
284 
285 
286 /* Use the finally tree to determine if a jump from START to TARGET
287    would leave the try_finally node that START lives in.  */
288 
289 static bool
outside_finally_tree(treemple start,gimple * target)290 outside_finally_tree (treemple start, gimple *target)
291 {
292   struct finally_tree_node n, *p;
293 
294   do
295     {
296       n.child = start;
297       p = finally_tree->find (&n);
298       if (!p)
299 	return true;
300       start.g = p->parent;
301     }
302   while (start.g != target);
303 
304   return false;
305 }
306 
307 /* Second pass of EH node decomposition.  Actually transform the GIMPLE_TRY
308    nodes into a set of gotos, magic labels, and eh regions.
309    The eh region creation is straight-forward, but frobbing all the gotos
310    and such into shape isn't.  */
311 
312 /* The sequence into which we record all EH stuff.  This will be
313    placed at the end of the function when we're all done.  */
314 static gimple_seq eh_seq;
315 
316 /* Record whether an EH region contains something that can throw,
317    indexed by EH region number.  */
318 static bitmap eh_region_may_contain_throw_map;
319 
320 /* The GOTO_QUEUE is an array of GIMPLE_GOTO and GIMPLE_RETURN
321    statements that are seen to escape this GIMPLE_TRY_FINALLY node.
322    The idea is to record a gimple statement for everything except for
323    the conditionals, which get their labels recorded. Since labels are
324    of type 'tree', we need this node to store both gimple and tree
325    objects.  REPL_STMT is the sequence used to replace the goto/return
326    statement.  CONT_STMT is used to store the statement that allows
327    the return/goto to jump to the original destination. */
328 
329 struct goto_queue_node
330 {
331   treemple stmt;
332   location_t location;
333   gimple_seq repl_stmt;
334   gimple *cont_stmt;
335   int index;
336   /* This is used when index >= 0 to indicate that stmt is a label (as
337      opposed to a goto stmt).  */
338   int is_label;
339 };
340 
341 /* State of the world while lowering.  */
342 
343 struct leh_state
344 {
345   /* What's "current" while constructing the eh region tree.  These
346      correspond to variables of the same name in cfun->eh, which we
347      don't have easy access to.  */
348   eh_region cur_region;
349 
350   /* What's "current" for the purposes of __builtin_eh_pointer.  For
351      a CATCH, this is the associated TRY.  For an EH_FILTER, this is
352      the associated ALLOWED_EXCEPTIONS, etc.  */
353   eh_region ehp_region;
354 
355   /* Processing of TRY_FINALLY requires a bit more state.  This is
356      split out into a separate structure so that we don't have to
357      copy so much when processing other nodes.  */
358   struct leh_tf_state *tf;
359 
360   /* Outer non-clean up region.  */
361   eh_region outer_non_cleanup;
362 };
363 
364 struct leh_tf_state
365 {
366   /* Pointer to the GIMPLE_TRY_FINALLY node under discussion.  The
367      try_finally_expr is the original GIMPLE_TRY_FINALLY.  We need to retain
368      this so that outside_finally_tree can reliably reference the tree used
369      in the collect_finally_tree data structures.  */
370   gtry *try_finally_expr;
371   gtry *top_p;
372 
373   /* While lowering a top_p usually it is expanded into multiple statements,
374      thus we need the following field to store them. */
375   gimple_seq top_p_seq;
376 
377   /* The state outside this try_finally node.  */
378   struct leh_state *outer;
379 
380   /* The exception region created for it.  */
381   eh_region region;
382 
383   /* The goto queue.  */
384   struct goto_queue_node *goto_queue;
385   size_t goto_queue_size;
386   size_t goto_queue_active;
387 
388   /* Pointer map to help in searching goto_queue when it is large.  */
389   hash_map<gimple *, goto_queue_node *> *goto_queue_map;
390 
391   /* The set of unique labels seen as entries in the goto queue.  */
392   vec<tree> dest_array;
393 
394   /* A label to be added at the end of the completed transformed
395      sequence.  It will be set if may_fallthru was true *at one time*,
396      though subsequent transformations may have cleared that flag.  */
397   tree fallthru_label;
398 
399   /* True if it is possible to fall out the bottom of the try block.
400      Cleared if the fallthru is converted to a goto.  */
401   bool may_fallthru;
402 
403   /* True if any entry in goto_queue is a GIMPLE_RETURN.  */
404   bool may_return;
405 
406   /* True if the finally block can receive an exception edge.
407      Cleared if the exception case is handled by code duplication.  */
408   bool may_throw;
409 };
410 
411 static gimple_seq lower_eh_must_not_throw (struct leh_state *, gtry *);
412 
413 /* Search for STMT in the goto queue.  Return the replacement,
414    or null if the statement isn't in the queue.  */
415 
416 #define LARGE_GOTO_QUEUE 20
417 
418 static void lower_eh_constructs_1 (struct leh_state *state, gimple_seq *seq);
419 
420 static gimple_seq
find_goto_replacement(struct leh_tf_state * tf,treemple stmt)421 find_goto_replacement (struct leh_tf_state *tf, treemple stmt)
422 {
423   unsigned int i;
424 
425   if (tf->goto_queue_active < LARGE_GOTO_QUEUE)
426     {
427       for (i = 0; i < tf->goto_queue_active; i++)
428 	if ( tf->goto_queue[i].stmt.g == stmt.g)
429 	  return tf->goto_queue[i].repl_stmt;
430       return NULL;
431     }
432 
433   /* If we have a large number of entries in the goto_queue, create a
434      pointer map and use that for searching.  */
435 
436   if (!tf->goto_queue_map)
437     {
438       tf->goto_queue_map = new hash_map<gimple *, goto_queue_node *>;
439       for (i = 0; i < tf->goto_queue_active; i++)
440 	{
441 	  bool existed = tf->goto_queue_map->put (tf->goto_queue[i].stmt.g,
442 						  &tf->goto_queue[i]);
443 	  gcc_assert (!existed);
444 	}
445     }
446 
447   goto_queue_node **slot = tf->goto_queue_map->get (stmt.g);
448   if (slot != NULL)
449     return ((*slot)->repl_stmt);
450 
451   return NULL;
452 }
453 
454 /* A subroutine of replace_goto_queue_1.  Handles the sub-clauses of a
455    lowered GIMPLE_COND.  If, by chance, the replacement is a simple goto,
456    then we can just splat it in, otherwise we add the new stmts immediately
457    after the GIMPLE_COND and redirect.  */
458 
459 static void
replace_goto_queue_cond_clause(tree * tp,struct leh_tf_state * tf,gimple_stmt_iterator * gsi)460 replace_goto_queue_cond_clause (tree *tp, struct leh_tf_state *tf,
461 				gimple_stmt_iterator *gsi)
462 {
463   tree label;
464   gimple_seq new_seq;
465   treemple temp;
466   location_t loc = gimple_location (gsi_stmt (*gsi));
467 
468   temp.tp = tp;
469   new_seq = find_goto_replacement (tf, temp);
470   if (!new_seq)
471     return;
472 
473   if (gimple_seq_singleton_p (new_seq)
474       && gimple_code (gimple_seq_first_stmt (new_seq)) == GIMPLE_GOTO)
475     {
476       *tp = gimple_goto_dest (gimple_seq_first_stmt (new_seq));
477       return;
478     }
479 
480   label = create_artificial_label (loc);
481   /* Set the new label for the GIMPLE_COND */
482   *tp = label;
483 
484   gsi_insert_after (gsi, gimple_build_label (label), GSI_CONTINUE_LINKING);
485   gsi_insert_seq_after (gsi, gimple_seq_copy (new_seq), GSI_CONTINUE_LINKING);
486 }
487 
488 /* The real work of replace_goto_queue.  Returns with TSI updated to
489    point to the next statement.  */
490 
491 static void replace_goto_queue_stmt_list (gimple_seq *, struct leh_tf_state *);
492 
493 static void
replace_goto_queue_1(gimple * stmt,struct leh_tf_state * tf,gimple_stmt_iterator * gsi)494 replace_goto_queue_1 (gimple *stmt, struct leh_tf_state *tf,
495 		      gimple_stmt_iterator *gsi)
496 {
497   gimple_seq seq;
498   treemple temp;
499   temp.g = NULL;
500 
501   switch (gimple_code (stmt))
502     {
503     case GIMPLE_GOTO:
504     case GIMPLE_RETURN:
505       temp.g = stmt;
506       seq = find_goto_replacement (tf, temp);
507       if (seq)
508 	{
509 	  gimple_stmt_iterator i;
510 	  seq = gimple_seq_copy (seq);
511 	  for (i = gsi_start (seq); !gsi_end_p (i); gsi_next (&i))
512 	    gimple_set_location (gsi_stmt (i), gimple_location (stmt));
513 	  gsi_insert_seq_before (gsi, seq, GSI_SAME_STMT);
514 	  gsi_remove (gsi, false);
515 	  return;
516 	}
517       break;
518 
519     case GIMPLE_COND:
520       replace_goto_queue_cond_clause (gimple_op_ptr (stmt, 2), tf, gsi);
521       replace_goto_queue_cond_clause (gimple_op_ptr (stmt, 3), tf, gsi);
522       break;
523 
524     case GIMPLE_TRY:
525       replace_goto_queue_stmt_list (gimple_try_eval_ptr (stmt), tf);
526       replace_goto_queue_stmt_list (gimple_try_cleanup_ptr (stmt), tf);
527       break;
528     case GIMPLE_CATCH:
529       replace_goto_queue_stmt_list (gimple_catch_handler_ptr (
530 				      as_a <gcatch *> (stmt)),
531 				    tf);
532       break;
533     case GIMPLE_EH_FILTER:
534       replace_goto_queue_stmt_list (gimple_eh_filter_failure_ptr (stmt), tf);
535       break;
536     case GIMPLE_EH_ELSE:
537       {
538 	geh_else *eh_else_stmt = as_a <geh_else *> (stmt);
539 	replace_goto_queue_stmt_list (gimple_eh_else_n_body_ptr (eh_else_stmt),
540 				      tf);
541 	replace_goto_queue_stmt_list (gimple_eh_else_e_body_ptr (eh_else_stmt),
542 				      tf);
543       }
544       break;
545 
546     default:
547       /* These won't have gotos in them.  */
548       break;
549     }
550 
551   gsi_next (gsi);
552 }
553 
554 /* A subroutine of replace_goto_queue.  Handles GIMPLE_SEQ.  */
555 
556 static void
replace_goto_queue_stmt_list(gimple_seq * seq,struct leh_tf_state * tf)557 replace_goto_queue_stmt_list (gimple_seq *seq, struct leh_tf_state *tf)
558 {
559   gimple_stmt_iterator gsi = gsi_start (*seq);
560 
561   while (!gsi_end_p (gsi))
562     replace_goto_queue_1 (gsi_stmt (gsi), tf, &gsi);
563 }
564 
565 /* Replace all goto queue members.  */
566 
567 static void
replace_goto_queue(struct leh_tf_state * tf)568 replace_goto_queue (struct leh_tf_state *tf)
569 {
570   if (tf->goto_queue_active == 0)
571     return;
572   replace_goto_queue_stmt_list (&tf->top_p_seq, tf);
573   replace_goto_queue_stmt_list (&eh_seq, tf);
574 }
575 
576 /* Add a new record to the goto queue contained in TF. NEW_STMT is the
577    data to be added, IS_LABEL indicates whether NEW_STMT is a label or
578    a gimple return. */
579 
580 static void
record_in_goto_queue(struct leh_tf_state * tf,treemple new_stmt,int index,bool is_label,location_t location)581 record_in_goto_queue (struct leh_tf_state *tf,
582                       treemple new_stmt,
583                       int index,
584                       bool is_label,
585 		      location_t location)
586 {
587   size_t active, size;
588   struct goto_queue_node *q;
589 
590   gcc_assert (!tf->goto_queue_map);
591 
592   active = tf->goto_queue_active;
593   size = tf->goto_queue_size;
594   if (active >= size)
595     {
596       size = (size ? size * 2 : 32);
597       tf->goto_queue_size = size;
598       tf->goto_queue
599          = XRESIZEVEC (struct goto_queue_node, tf->goto_queue, size);
600     }
601 
602   q = &tf->goto_queue[active];
603   tf->goto_queue_active = active + 1;
604 
605   memset (q, 0, sizeof (*q));
606   q->stmt = new_stmt;
607   q->index = index;
608   q->location = location;
609   q->is_label = is_label;
610 }
611 
612 /* Record the LABEL label in the goto queue contained in TF.
613    TF is not null.  */
614 
615 static void
record_in_goto_queue_label(struct leh_tf_state * tf,treemple stmt,tree label,location_t location)616 record_in_goto_queue_label (struct leh_tf_state *tf, treemple stmt, tree label,
617 			    location_t location)
618 {
619   int index;
620   treemple temp, new_stmt;
621 
622   if (!label)
623     return;
624 
625   /* Computed and non-local gotos do not get processed.  Given
626      their nature we can neither tell whether we've escaped the
627      finally block nor redirect them if we knew.  */
628   if (TREE_CODE (label) != LABEL_DECL)
629     return;
630 
631   /* No need to record gotos that don't leave the try block.  */
632   temp.t = label;
633   if (!outside_finally_tree (temp, tf->try_finally_expr))
634     return;
635 
636   if (! tf->dest_array.exists ())
637     {
638       tf->dest_array.create (10);
639       tf->dest_array.quick_push (label);
640       index = 0;
641     }
642   else
643     {
644       int n = tf->dest_array.length ();
645       for (index = 0; index < n; ++index)
646         if (tf->dest_array[index] == label)
647           break;
648       if (index == n)
649         tf->dest_array.safe_push (label);
650     }
651 
652   /* In the case of a GOTO we want to record the destination label,
653      since with a GIMPLE_COND we have an easy access to the then/else
654      labels. */
655   new_stmt = stmt;
656   record_in_goto_queue (tf, new_stmt, index, true, location);
657 }
658 
659 /* For any GIMPLE_GOTO or GIMPLE_RETURN, decide whether it leaves a try_finally
660    node, and if so record that fact in the goto queue associated with that
661    try_finally node.  */
662 
663 static void
maybe_record_in_goto_queue(struct leh_state * state,gimple * stmt)664 maybe_record_in_goto_queue (struct leh_state *state, gimple *stmt)
665 {
666   struct leh_tf_state *tf = state->tf;
667   treemple new_stmt;
668 
669   if (!tf)
670     return;
671 
672   switch (gimple_code (stmt))
673     {
674     case GIMPLE_COND:
675       {
676 	gcond *cond_stmt = as_a <gcond *> (stmt);
677 	new_stmt.tp = gimple_op_ptr (cond_stmt, 2);
678 	record_in_goto_queue_label (tf, new_stmt,
679 				    gimple_cond_true_label (cond_stmt),
680 				    EXPR_LOCATION (*new_stmt.tp));
681 	new_stmt.tp = gimple_op_ptr (cond_stmt, 3);
682 	record_in_goto_queue_label (tf, new_stmt,
683 				    gimple_cond_false_label (cond_stmt),
684 				    EXPR_LOCATION (*new_stmt.tp));
685       }
686       break;
687     case GIMPLE_GOTO:
688       new_stmt.g = stmt;
689       record_in_goto_queue_label (tf, new_stmt, gimple_goto_dest (stmt),
690 				  gimple_location (stmt));
691       break;
692 
693     case GIMPLE_RETURN:
694       tf->may_return = true;
695       new_stmt.g = stmt;
696       record_in_goto_queue (tf, new_stmt, -1, false, gimple_location (stmt));
697       break;
698 
699     default:
700       gcc_unreachable ();
701     }
702 }
703 
704 
705 #if CHECKING_P
706 /* We do not process GIMPLE_SWITCHes for now.  As long as the original source
707    was in fact structured, and we've not yet done jump threading, then none
708    of the labels will leave outer GIMPLE_TRY_FINALLY nodes. Verify this.  */
709 
710 static void
verify_norecord_switch_expr(struct leh_state * state,gswitch * switch_expr)711 verify_norecord_switch_expr (struct leh_state *state,
712 			     gswitch *switch_expr)
713 {
714   struct leh_tf_state *tf = state->tf;
715   size_t i, n;
716 
717   if (!tf)
718     return;
719 
720   n = gimple_switch_num_labels (switch_expr);
721 
722   for (i = 0; i < n; ++i)
723     {
724       treemple temp;
725       tree lab = CASE_LABEL (gimple_switch_label (switch_expr, i));
726       temp.t = lab;
727       gcc_assert (!outside_finally_tree (temp, tf->try_finally_expr));
728     }
729 }
730 #else
731 #define verify_norecord_switch_expr(state, switch_expr)
732 #endif
733 
734 /* Redirect a RETURN_EXPR pointed to by Q to FINLAB.  If MOD is
735    non-null, insert it before the new branch.  */
736 
737 static void
do_return_redirection(struct goto_queue_node * q,tree finlab,gimple_seq mod)738 do_return_redirection (struct goto_queue_node *q, tree finlab, gimple_seq mod)
739 {
740   gimple *x;
741 
742   /* In the case of a return, the queue node must be a gimple statement.  */
743   gcc_assert (!q->is_label);
744 
745   /* Note that the return value may have already been computed, e.g.,
746 
747 	int x;
748 	int foo (void)
749 	{
750 	  x = 0;
751 	  try {
752 	    return x;
753 	  } finally {
754 	    x++;
755 	  }
756 	}
757 
758      should return 0, not 1.  We don't have to do anything to make
759      this happens because the return value has been placed in the
760      RESULT_DECL already.  */
761 
762   q->cont_stmt = q->stmt.g;
763 
764   if (mod)
765     gimple_seq_add_seq (&q->repl_stmt, mod);
766 
767   x = gimple_build_goto (finlab);
768   gimple_set_location (x, q->location);
769   gimple_seq_add_stmt (&q->repl_stmt, x);
770 }
771 
772 /* Similar, but easier, for GIMPLE_GOTO.  */
773 
774 static void
do_goto_redirection(struct goto_queue_node * q,tree finlab,gimple_seq mod,struct leh_tf_state * tf)775 do_goto_redirection (struct goto_queue_node *q, tree finlab, gimple_seq mod,
776 		     struct leh_tf_state *tf)
777 {
778   ggoto *x;
779 
780   gcc_assert (q->is_label);
781 
782   q->cont_stmt = gimple_build_goto (tf->dest_array[q->index]);
783 
784   if (mod)
785     gimple_seq_add_seq (&q->repl_stmt, mod);
786 
787   x = gimple_build_goto (finlab);
788   gimple_set_location (x, q->location);
789   gimple_seq_add_stmt (&q->repl_stmt, x);
790 }
791 
792 /* Emit a standard landing pad sequence into SEQ for REGION.  */
793 
794 static void
emit_post_landing_pad(gimple_seq * seq,eh_region region)795 emit_post_landing_pad (gimple_seq *seq, eh_region region)
796 {
797   eh_landing_pad lp = region->landing_pads;
798   glabel *x;
799 
800   if (lp == NULL)
801     lp = gen_eh_landing_pad (region);
802 
803   lp->post_landing_pad = create_artificial_label (UNKNOWN_LOCATION);
804   EH_LANDING_PAD_NR (lp->post_landing_pad) = lp->index;
805 
806   x = gimple_build_label (lp->post_landing_pad);
807   gimple_seq_add_stmt (seq, x);
808 }
809 
810 /* Emit a RESX statement into SEQ for REGION.  */
811 
812 static void
emit_resx(gimple_seq * seq,eh_region region)813 emit_resx (gimple_seq *seq, eh_region region)
814 {
815   gresx *x = gimple_build_resx (region->index);
816   gimple_seq_add_stmt (seq, x);
817   if (region->outer)
818     record_stmt_eh_region (region->outer, x);
819 }
820 
821 /* Note that the current EH region may contain a throw, or a
822    call to a function which itself may contain a throw.  */
823 
824 static void
note_eh_region_may_contain_throw(eh_region region)825 note_eh_region_may_contain_throw (eh_region region)
826 {
827   while (bitmap_set_bit (eh_region_may_contain_throw_map, region->index))
828     {
829       if (region->type == ERT_MUST_NOT_THROW)
830 	break;
831       region = region->outer;
832       if (region == NULL)
833 	break;
834     }
835 }
836 
837 /* Check if REGION has been marked as containing a throw.  If REGION is
838    NULL, this predicate is false.  */
839 
840 static inline bool
eh_region_may_contain_throw(eh_region r)841 eh_region_may_contain_throw (eh_region r)
842 {
843   return r && bitmap_bit_p (eh_region_may_contain_throw_map, r->index);
844 }
845 
846 /* We want to transform
847 	try { body; } catch { stuff; }
848    to
849 	normal_sequence:
850 	  body;
851 	  over:
852 	eh_sequence:
853 	  landing_pad:
854 	  stuff;
855 	  goto over;
856 
857    TP is a GIMPLE_TRY node.  REGION is the region whose post_landing_pad
858    should be placed before the second operand, or NULL.  OVER is
859    an existing label that should be put at the exit, or NULL.  */
860 
861 static gimple_seq
frob_into_branch_around(gtry * tp,eh_region region,tree over)862 frob_into_branch_around (gtry *tp, eh_region region, tree over)
863 {
864   gimple *x;
865   gimple_seq cleanup, result;
866   location_t loc = gimple_location (tp);
867 
868   cleanup = gimple_try_cleanup (tp);
869   result = gimple_try_eval (tp);
870 
871   if (region)
872     emit_post_landing_pad (&eh_seq, region);
873 
874   if (gimple_seq_may_fallthru (cleanup))
875     {
876       if (!over)
877 	over = create_artificial_label (loc);
878       x = gimple_build_goto (over);
879       gimple_set_location (x, loc);
880       gimple_seq_add_stmt (&cleanup, x);
881     }
882   gimple_seq_add_seq (&eh_seq, cleanup);
883 
884   if (over)
885     {
886       x = gimple_build_label (over);
887       gimple_seq_add_stmt (&result, x);
888     }
889   return result;
890 }
891 
892 /* A subroutine of lower_try_finally.  Duplicate the tree rooted at T.
893    Make sure to record all new labels found.  */
894 
895 static gimple_seq
lower_try_finally_dup_block(gimple_seq seq,struct leh_state * outer_state,location_t loc)896 lower_try_finally_dup_block (gimple_seq seq, struct leh_state *outer_state,
897 			     location_t loc)
898 {
899   gtry *region = NULL;
900   gimple_seq new_seq;
901   gimple_stmt_iterator gsi;
902 
903   new_seq = copy_gimple_seq_and_replace_locals (seq);
904 
905   for (gsi = gsi_start (new_seq); !gsi_end_p (gsi); gsi_next (&gsi))
906     {
907       gimple *stmt = gsi_stmt (gsi);
908       /* We duplicate __builtin_stack_restore at -O0 in the hope of eliminating
909 	 it on the EH paths.  When it is not eliminated, make it transparent in
910 	 the debug info.  */
911       if (gimple_call_builtin_p (stmt, BUILT_IN_STACK_RESTORE))
912 	gimple_set_location (stmt, UNKNOWN_LOCATION);
913       else if (LOCATION_LOCUS (gimple_location (stmt)) == UNKNOWN_LOCATION)
914 	{
915 	  tree block = gimple_block (stmt);
916 	  gimple_set_location (stmt, loc);
917 	  gimple_set_block (stmt, block);
918 	}
919     }
920 
921   if (outer_state->tf)
922     region = outer_state->tf->try_finally_expr;
923   collect_finally_tree_1 (new_seq, region);
924 
925   return new_seq;
926 }
927 
928 /* A subroutine of lower_try_finally.  Create a fallthru label for
929    the given try_finally state.  The only tricky bit here is that
930    we have to make sure to record the label in our outer context.  */
931 
932 static tree
lower_try_finally_fallthru_label(struct leh_tf_state * tf)933 lower_try_finally_fallthru_label (struct leh_tf_state *tf)
934 {
935   tree label = tf->fallthru_label;
936   treemple temp;
937 
938   if (!label)
939     {
940       label = create_artificial_label (gimple_location (tf->try_finally_expr));
941       tf->fallthru_label = label;
942       if (tf->outer->tf)
943         {
944           temp.t = label;
945           record_in_finally_tree (temp, tf->outer->tf->try_finally_expr);
946         }
947     }
948   return label;
949 }
950 
951 /* A subroutine of lower_try_finally.  If FINALLY consits of a
952    GIMPLE_EH_ELSE node, return it.  */
953 
954 static inline geh_else *
get_eh_else(gimple_seq finally)955 get_eh_else (gimple_seq finally)
956 {
957   gimple *x = gimple_seq_first_stmt (finally);
958   if (gimple_code (x) == GIMPLE_EH_ELSE)
959     {
960       gcc_assert (gimple_seq_singleton_p (finally));
961       return as_a <geh_else *> (x);
962     }
963   return NULL;
964 }
965 
966 /* A subroutine of lower_try_finally.  If the eh_protect_cleanup_actions
967    langhook returns non-null, then the language requires that the exception
968    path out of a try_finally be treated specially.  To wit: the code within
969    the finally block may not itself throw an exception.  We have two choices
970    here. First we can duplicate the finally block and wrap it in a
971    must_not_throw region.  Second, we can generate code like
972 
973 	try {
974 	  finally_block;
975 	} catch {
976 	  if (fintmp == eh_edge)
977 	    protect_cleanup_actions;
978 	}
979 
980    where "fintmp" is the temporary used in the switch statement generation
981    alternative considered below.  For the nonce, we always choose the first
982    option.
983 
984    THIS_STATE may be null if this is a try-cleanup, not a try-finally.  */
985 
986 static void
honor_protect_cleanup_actions(struct leh_state * outer_state,struct leh_state * this_state,struct leh_tf_state * tf)987 honor_protect_cleanup_actions (struct leh_state *outer_state,
988 			       struct leh_state *this_state,
989 			       struct leh_tf_state *tf)
990 {
991   gimple_seq finally = gimple_try_cleanup (tf->top_p);
992 
993   /* EH_ELSE doesn't come from user code; only compiler generated stuff.
994      It does need to be handled here, so as to separate the (different)
995      EH path from the normal path.  But we should not attempt to wrap
996      it with a must-not-throw node (which indeed gets in the way).  */
997   if (geh_else *eh_else = get_eh_else (finally))
998     {
999       gimple_try_set_cleanup (tf->top_p, gimple_eh_else_n_body (eh_else));
1000       finally = gimple_eh_else_e_body (eh_else);
1001 
1002       /* Let the ELSE see the exception that's being processed, but
1003 	 since the cleanup is outside the try block, process it with
1004 	 outer_state, otherwise it may be used as a cleanup for
1005 	 itself, and Bad Things (TM) ensue.  */
1006       eh_region save_ehp = outer_state->ehp_region;
1007       outer_state->ehp_region = this_state->cur_region;
1008       lower_eh_constructs_1 (outer_state, &finally);
1009       outer_state->ehp_region = save_ehp;
1010     }
1011   else
1012     {
1013       /* First check for nothing to do.  */
1014       if (lang_hooks.eh_protect_cleanup_actions == NULL)
1015 	return;
1016       tree actions = lang_hooks.eh_protect_cleanup_actions ();
1017       if (actions == NULL)
1018 	return;
1019 
1020       if (this_state)
1021 	finally = lower_try_finally_dup_block (finally, outer_state,
1022 	  gimple_location (tf->try_finally_expr));
1023 
1024       /* If this cleanup consists of a TRY_CATCH_EXPR with TRY_CATCH_IS_CLEANUP
1025 	 set, the handler of the TRY_CATCH_EXPR is another cleanup which ought
1026 	 to be in an enclosing scope, but needs to be implemented at this level
1027 	 to avoid a nesting violation (see wrap_temporary_cleanups in
1028 	 cp/decl.c).  Since it's logically at an outer level, we should call
1029 	 terminate before we get to it, so strip it away before adding the
1030 	 MUST_NOT_THROW filter.  */
1031       gimple_stmt_iterator gsi = gsi_start (finally);
1032       gimple *x = gsi_stmt (gsi);
1033       if (gimple_code (x) == GIMPLE_TRY
1034 	  && gimple_try_kind (x) == GIMPLE_TRY_CATCH
1035 	  && gimple_try_catch_is_cleanup (x))
1036 	{
1037 	  gsi_insert_seq_before (&gsi, gimple_try_eval (x), GSI_SAME_STMT);
1038 	  gsi_remove (&gsi, false);
1039 	}
1040 
1041       /* Wrap the block with protect_cleanup_actions as the action.  */
1042       geh_mnt *eh_mnt = gimple_build_eh_must_not_throw (actions);
1043       gtry *try_stmt = gimple_build_try (finally,
1044 					 gimple_seq_alloc_with_stmt (eh_mnt),
1045 					 GIMPLE_TRY_CATCH);
1046       finally = lower_eh_must_not_throw (outer_state, try_stmt);
1047     }
1048 
1049   /* Drop all of this into the exception sequence.  */
1050   emit_post_landing_pad (&eh_seq, tf->region);
1051   gimple_seq_add_seq (&eh_seq, finally);
1052   if (gimple_seq_may_fallthru (finally))
1053     emit_resx (&eh_seq, tf->region);
1054 
1055   /* Having now been handled, EH isn't to be considered with
1056      the rest of the outgoing edges.  */
1057   tf->may_throw = false;
1058 }
1059 
1060 /* A subroutine of lower_try_finally.  We have determined that there is
1061    no fallthru edge out of the finally block.  This means that there is
1062    no outgoing edge corresponding to any incoming edge.  Restructure the
1063    try_finally node for this special case.  */
1064 
1065 static void
lower_try_finally_nofallthru(struct leh_state * state,struct leh_tf_state * tf)1066 lower_try_finally_nofallthru (struct leh_state *state,
1067 			      struct leh_tf_state *tf)
1068 {
1069   tree lab;
1070   gimple *x;
1071   geh_else *eh_else;
1072   gimple_seq finally;
1073   struct goto_queue_node *q, *qe;
1074 
1075   lab = create_artificial_label (gimple_location (tf->try_finally_expr));
1076 
1077   /* We expect that tf->top_p is a GIMPLE_TRY. */
1078   finally = gimple_try_cleanup (tf->top_p);
1079   tf->top_p_seq = gimple_try_eval (tf->top_p);
1080 
1081   x = gimple_build_label (lab);
1082   gimple_seq_add_stmt (&tf->top_p_seq, x);
1083 
1084   q = tf->goto_queue;
1085   qe = q + tf->goto_queue_active;
1086   for (; q < qe; ++q)
1087     if (q->index < 0)
1088       do_return_redirection (q, lab, NULL);
1089     else
1090       do_goto_redirection (q, lab, NULL, tf);
1091 
1092   replace_goto_queue (tf);
1093 
1094   /* Emit the finally block into the stream.  Lower EH_ELSE at this time.  */
1095   eh_else = get_eh_else (finally);
1096   if (eh_else)
1097     {
1098       finally = gimple_eh_else_n_body (eh_else);
1099       lower_eh_constructs_1 (state, &finally);
1100       gimple_seq_add_seq (&tf->top_p_seq, finally);
1101 
1102       if (tf->may_throw)
1103 	{
1104 	  finally = gimple_eh_else_e_body (eh_else);
1105 	  lower_eh_constructs_1 (state, &finally);
1106 
1107 	  emit_post_landing_pad (&eh_seq, tf->region);
1108 	  gimple_seq_add_seq (&eh_seq, finally);
1109 	}
1110     }
1111   else
1112     {
1113       lower_eh_constructs_1 (state, &finally);
1114       gimple_seq_add_seq (&tf->top_p_seq, finally);
1115 
1116       if (tf->may_throw)
1117 	{
1118 	  emit_post_landing_pad (&eh_seq, tf->region);
1119 
1120 	  x = gimple_build_goto (lab);
1121 	  gimple_set_location (x, gimple_location (tf->try_finally_expr));
1122 	  gimple_seq_add_stmt (&eh_seq, x);
1123 	}
1124     }
1125 }
1126 
1127 /* A subroutine of lower_try_finally.  We have determined that there is
1128    exactly one destination of the finally block.  Restructure the
1129    try_finally node for this special case.  */
1130 
1131 static void
lower_try_finally_onedest(struct leh_state * state,struct leh_tf_state * tf)1132 lower_try_finally_onedest (struct leh_state *state, struct leh_tf_state *tf)
1133 {
1134   struct goto_queue_node *q, *qe;
1135   geh_else *eh_else;
1136   glabel *label_stmt;
1137   gimple *x;
1138   gimple_seq finally;
1139   gimple_stmt_iterator gsi;
1140   tree finally_label;
1141   location_t loc = gimple_location (tf->try_finally_expr);
1142 
1143   finally = gimple_try_cleanup (tf->top_p);
1144   tf->top_p_seq = gimple_try_eval (tf->top_p);
1145 
1146   /* Since there's only one destination, and the destination edge can only
1147      either be EH or non-EH, that implies that all of our incoming edges
1148      are of the same type.  Therefore we can lower EH_ELSE immediately.  */
1149   eh_else = get_eh_else (finally);
1150   if (eh_else)
1151     {
1152       if (tf->may_throw)
1153 	finally = gimple_eh_else_e_body (eh_else);
1154       else
1155 	finally = gimple_eh_else_n_body (eh_else);
1156     }
1157 
1158   lower_eh_constructs_1 (state, &finally);
1159 
1160   for (gsi = gsi_start (finally); !gsi_end_p (gsi); gsi_next (&gsi))
1161     {
1162       gimple *stmt = gsi_stmt (gsi);
1163       if (LOCATION_LOCUS (gimple_location (stmt)) == UNKNOWN_LOCATION)
1164 	{
1165 	  tree block = gimple_block (stmt);
1166 	  gimple_set_location (stmt, gimple_location (tf->try_finally_expr));
1167 	  gimple_set_block (stmt, block);
1168 	}
1169     }
1170 
1171   if (tf->may_throw)
1172     {
1173       /* Only reachable via the exception edge.  Add the given label to
1174          the head of the FINALLY block.  Append a RESX at the end.  */
1175       emit_post_landing_pad (&eh_seq, tf->region);
1176       gimple_seq_add_seq (&eh_seq, finally);
1177       emit_resx (&eh_seq, tf->region);
1178       return;
1179     }
1180 
1181   if (tf->may_fallthru)
1182     {
1183       /* Only reachable via the fallthru edge.  Do nothing but let
1184 	 the two blocks run together; we'll fall out the bottom.  */
1185       gimple_seq_add_seq (&tf->top_p_seq, finally);
1186       return;
1187     }
1188 
1189   finally_label = create_artificial_label (loc);
1190   label_stmt = gimple_build_label (finally_label);
1191   gimple_seq_add_stmt (&tf->top_p_seq, label_stmt);
1192 
1193   gimple_seq_add_seq (&tf->top_p_seq, finally);
1194 
1195   q = tf->goto_queue;
1196   qe = q + tf->goto_queue_active;
1197 
1198   if (tf->may_return)
1199     {
1200       /* Reachable by return expressions only.  Redirect them.  */
1201       for (; q < qe; ++q)
1202 	do_return_redirection (q, finally_label, NULL);
1203       replace_goto_queue (tf);
1204     }
1205   else
1206     {
1207       /* Reachable by goto expressions only.  Redirect them.  */
1208       for (; q < qe; ++q)
1209 	do_goto_redirection (q, finally_label, NULL, tf);
1210       replace_goto_queue (tf);
1211 
1212       if (tf->dest_array[0] == tf->fallthru_label)
1213 	{
1214 	  /* Reachable by goto to fallthru label only.  Redirect it
1215 	     to the new label (already created, sadly), and do not
1216 	     emit the final branch out, or the fallthru label.  */
1217 	  tf->fallthru_label = NULL;
1218 	  return;
1219 	}
1220     }
1221 
1222   /* Place the original return/goto to the original destination
1223      immediately after the finally block. */
1224   x = tf->goto_queue[0].cont_stmt;
1225   gimple_seq_add_stmt (&tf->top_p_seq, x);
1226   maybe_record_in_goto_queue (state, x);
1227 }
1228 
1229 /* A subroutine of lower_try_finally.  There are multiple edges incoming
1230    and outgoing from the finally block.  Implement this by duplicating the
1231    finally block for every destination.  */
1232 
1233 static void
lower_try_finally_copy(struct leh_state * state,struct leh_tf_state * tf)1234 lower_try_finally_copy (struct leh_state *state, struct leh_tf_state *tf)
1235 {
1236   gimple_seq finally;
1237   gimple_seq new_stmt;
1238   gimple_seq seq;
1239   gimple *x;
1240   geh_else *eh_else;
1241   tree tmp;
1242   location_t tf_loc = gimple_location (tf->try_finally_expr);
1243 
1244   finally = gimple_try_cleanup (tf->top_p);
1245 
1246   /* Notice EH_ELSE, and simplify some of the remaining code
1247      by considering FINALLY to be the normal return path only.  */
1248   eh_else = get_eh_else (finally);
1249   if (eh_else)
1250     finally = gimple_eh_else_n_body (eh_else);
1251 
1252   tf->top_p_seq = gimple_try_eval (tf->top_p);
1253   new_stmt = NULL;
1254 
1255   if (tf->may_fallthru)
1256     {
1257       seq = lower_try_finally_dup_block (finally, state, tf_loc);
1258       lower_eh_constructs_1 (state, &seq);
1259       gimple_seq_add_seq (&new_stmt, seq);
1260 
1261       tmp = lower_try_finally_fallthru_label (tf);
1262       x = gimple_build_goto (tmp);
1263       gimple_set_location (x, tf_loc);
1264       gimple_seq_add_stmt (&new_stmt, x);
1265     }
1266 
1267   if (tf->may_throw)
1268     {
1269       /* We don't need to copy the EH path of EH_ELSE,
1270 	 since it is only emitted once.  */
1271       if (eh_else)
1272 	seq = gimple_eh_else_e_body (eh_else);
1273       else
1274 	seq = lower_try_finally_dup_block (finally, state, tf_loc);
1275       lower_eh_constructs_1 (state, &seq);
1276 
1277       emit_post_landing_pad (&eh_seq, tf->region);
1278       gimple_seq_add_seq (&eh_seq, seq);
1279       emit_resx (&eh_seq, tf->region);
1280     }
1281 
1282   if (tf->goto_queue)
1283     {
1284       struct goto_queue_node *q, *qe;
1285       int return_index, index;
1286       struct labels_s
1287       {
1288 	struct goto_queue_node *q;
1289 	tree label;
1290       } *labels;
1291 
1292       return_index = tf->dest_array.length ();
1293       labels = XCNEWVEC (struct labels_s, return_index + 1);
1294 
1295       q = tf->goto_queue;
1296       qe = q + tf->goto_queue_active;
1297       for (; q < qe; q++)
1298 	{
1299 	  index = q->index < 0 ? return_index : q->index;
1300 
1301 	  if (!labels[index].q)
1302 	    labels[index].q = q;
1303 	}
1304 
1305       for (index = 0; index < return_index + 1; index++)
1306 	{
1307 	  tree lab;
1308 
1309 	  q = labels[index].q;
1310 	  if (! q)
1311 	    continue;
1312 
1313 	  lab = labels[index].label
1314 	    = create_artificial_label (tf_loc);
1315 
1316 	  if (index == return_index)
1317 	    do_return_redirection (q, lab, NULL);
1318 	  else
1319 	    do_goto_redirection (q, lab, NULL, tf);
1320 
1321 	  x = gimple_build_label (lab);
1322           gimple_seq_add_stmt (&new_stmt, x);
1323 
1324 	  seq = lower_try_finally_dup_block (finally, state, q->location);
1325 	  lower_eh_constructs_1 (state, &seq);
1326           gimple_seq_add_seq (&new_stmt, seq);
1327 
1328           gimple_seq_add_stmt (&new_stmt, q->cont_stmt);
1329 	  maybe_record_in_goto_queue (state, q->cont_stmt);
1330 	}
1331 
1332       for (q = tf->goto_queue; q < qe; q++)
1333 	{
1334 	  tree lab;
1335 
1336 	  index = q->index < 0 ? return_index : q->index;
1337 
1338 	  if (labels[index].q == q)
1339 	    continue;
1340 
1341 	  lab = labels[index].label;
1342 
1343 	  if (index == return_index)
1344 	    do_return_redirection (q, lab, NULL);
1345 	  else
1346 	    do_goto_redirection (q, lab, NULL, tf);
1347 	}
1348 
1349       replace_goto_queue (tf);
1350       free (labels);
1351     }
1352 
1353   /* Need to link new stmts after running replace_goto_queue due
1354      to not wanting to process the same goto stmts twice.  */
1355   gimple_seq_add_seq (&tf->top_p_seq, new_stmt);
1356 }
1357 
1358 /* A subroutine of lower_try_finally.  There are multiple edges incoming
1359    and outgoing from the finally block.  Implement this by instrumenting
1360    each incoming edge and creating a switch statement at the end of the
1361    finally block that branches to the appropriate destination.  */
1362 
1363 static void
lower_try_finally_switch(struct leh_state * state,struct leh_tf_state * tf)1364 lower_try_finally_switch (struct leh_state *state, struct leh_tf_state *tf)
1365 {
1366   struct goto_queue_node *q, *qe;
1367   tree finally_tmp, finally_label;
1368   int return_index, eh_index, fallthru_index;
1369   int nlabels, ndests, j, last_case_index;
1370   tree last_case;
1371   auto_vec<tree> case_label_vec;
1372   gimple_seq switch_body = NULL;
1373   gimple *x;
1374   geh_else *eh_else;
1375   tree tmp;
1376   gimple *switch_stmt;
1377   gimple_seq finally;
1378   hash_map<tree, gimple *> *cont_map = NULL;
1379   /* The location of the TRY_FINALLY stmt.  */
1380   location_t tf_loc = gimple_location (tf->try_finally_expr);
1381   /* The location of the finally block.  */
1382   location_t finally_loc;
1383 
1384   finally = gimple_try_cleanup (tf->top_p);
1385   eh_else = get_eh_else (finally);
1386 
1387   /* Mash the TRY block to the head of the chain.  */
1388   tf->top_p_seq = gimple_try_eval (tf->top_p);
1389 
1390   /* The location of the finally is either the last stmt in the finally
1391      block or the location of the TRY_FINALLY itself.  */
1392   x = gimple_seq_last_stmt (finally);
1393   finally_loc = x ? gimple_location (x) : tf_loc;
1394 
1395   /* Prepare for switch statement generation.  */
1396   nlabels = tf->dest_array.length ();
1397   return_index = nlabels;
1398   eh_index = return_index + tf->may_return;
1399   fallthru_index = eh_index + (tf->may_throw && !eh_else);
1400   ndests = fallthru_index + tf->may_fallthru;
1401 
1402   finally_tmp = create_tmp_var (integer_type_node, "finally_tmp");
1403   finally_label = create_artificial_label (finally_loc);
1404 
1405   /* We use vec::quick_push on case_label_vec throughout this function,
1406      since we know the size in advance and allocate precisely as muce
1407      space as needed.  */
1408   case_label_vec.create (ndests);
1409   last_case = NULL;
1410   last_case_index = 0;
1411 
1412   /* Begin inserting code for getting to the finally block.  Things
1413      are done in this order to correspond to the sequence the code is
1414      laid out.  */
1415 
1416   if (tf->may_fallthru)
1417     {
1418       x = gimple_build_assign (finally_tmp,
1419 			       build_int_cst (integer_type_node,
1420 					      fallthru_index));
1421       gimple_set_location (x, finally_loc);
1422       gimple_seq_add_stmt (&tf->top_p_seq, x);
1423 
1424       tmp = build_int_cst (integer_type_node, fallthru_index);
1425       last_case = build_case_label (tmp, NULL,
1426 				    create_artificial_label (finally_loc));
1427       case_label_vec.quick_push (last_case);
1428       last_case_index++;
1429 
1430       x = gimple_build_label (CASE_LABEL (last_case));
1431       gimple_seq_add_stmt (&switch_body, x);
1432 
1433       tmp = lower_try_finally_fallthru_label (tf);
1434       x = gimple_build_goto (tmp);
1435       gimple_set_location (x, finally_loc);
1436       gimple_seq_add_stmt (&switch_body, x);
1437     }
1438 
1439   /* For EH_ELSE, emit the exception path (plus resx) now, then
1440      subsequently we only need consider the normal path.  */
1441   if (eh_else)
1442     {
1443       if (tf->may_throw)
1444 	{
1445 	  finally = gimple_eh_else_e_body (eh_else);
1446 	  lower_eh_constructs_1 (state, &finally);
1447 
1448 	  emit_post_landing_pad (&eh_seq, tf->region);
1449 	  gimple_seq_add_seq (&eh_seq, finally);
1450 	  emit_resx (&eh_seq, tf->region);
1451 	}
1452 
1453       finally = gimple_eh_else_n_body (eh_else);
1454     }
1455   else if (tf->may_throw)
1456     {
1457       emit_post_landing_pad (&eh_seq, tf->region);
1458 
1459       x = gimple_build_assign (finally_tmp,
1460 			       build_int_cst (integer_type_node, eh_index));
1461       gimple_seq_add_stmt (&eh_seq, x);
1462 
1463       x = gimple_build_goto (finally_label);
1464       gimple_set_location (x, tf_loc);
1465       gimple_seq_add_stmt (&eh_seq, x);
1466 
1467       tmp = build_int_cst (integer_type_node, eh_index);
1468       last_case = build_case_label (tmp, NULL,
1469 				    create_artificial_label (tf_loc));
1470       case_label_vec.quick_push (last_case);
1471       last_case_index++;
1472 
1473       x = gimple_build_label (CASE_LABEL (last_case));
1474       gimple_seq_add_stmt (&eh_seq, x);
1475       emit_resx (&eh_seq, tf->region);
1476     }
1477 
1478   x = gimple_build_label (finally_label);
1479   gimple_seq_add_stmt (&tf->top_p_seq, x);
1480 
1481   lower_eh_constructs_1 (state, &finally);
1482   gimple_seq_add_seq (&tf->top_p_seq, finally);
1483 
1484   /* Redirect each incoming goto edge.  */
1485   q = tf->goto_queue;
1486   qe = q + tf->goto_queue_active;
1487   j = last_case_index + tf->may_return;
1488   /* Prepare the assignments to finally_tmp that are executed upon the
1489      entrance through a particular edge. */
1490   for (; q < qe; ++q)
1491     {
1492       gimple_seq mod = NULL;
1493       int switch_id;
1494       unsigned int case_index;
1495 
1496       if (q->index < 0)
1497 	{
1498 	  x = gimple_build_assign (finally_tmp,
1499 				   build_int_cst (integer_type_node,
1500 						  return_index));
1501 	  gimple_seq_add_stmt (&mod, x);
1502 	  do_return_redirection (q, finally_label, mod);
1503 	  switch_id = return_index;
1504 	}
1505       else
1506 	{
1507 	  x = gimple_build_assign (finally_tmp,
1508 				   build_int_cst (integer_type_node, q->index));
1509 	  gimple_seq_add_stmt (&mod, x);
1510 	  do_goto_redirection (q, finally_label, mod, tf);
1511 	  switch_id = q->index;
1512 	}
1513 
1514       case_index = j + q->index;
1515       if (case_label_vec.length () <= case_index || !case_label_vec[case_index])
1516         {
1517           tree case_lab;
1518 	  tmp = build_int_cst (integer_type_node, switch_id);
1519           case_lab = build_case_label (tmp, NULL,
1520 				       create_artificial_label (tf_loc));
1521           /* We store the cont_stmt in the pointer map, so that we can recover
1522              it in the loop below.  */
1523           if (!cont_map)
1524 	    cont_map = new hash_map<tree, gimple *>;
1525           cont_map->put (case_lab, q->cont_stmt);
1526           case_label_vec.quick_push (case_lab);
1527         }
1528     }
1529   for (j = last_case_index; j < last_case_index + nlabels; j++)
1530     {
1531       gimple *cont_stmt;
1532 
1533       last_case = case_label_vec[j];
1534 
1535       gcc_assert (last_case);
1536       gcc_assert (cont_map);
1537 
1538       cont_stmt = *cont_map->get (last_case);
1539 
1540       x = gimple_build_label (CASE_LABEL (last_case));
1541       gimple_seq_add_stmt (&switch_body, x);
1542       gimple_seq_add_stmt (&switch_body, cont_stmt);
1543       maybe_record_in_goto_queue (state, cont_stmt);
1544     }
1545   if (cont_map)
1546     delete cont_map;
1547 
1548   replace_goto_queue (tf);
1549 
1550   /* Make sure that the last case is the default label, as one is required.
1551      Then sort the labels, which is also required in GIMPLE.  */
1552   CASE_LOW (last_case) = NULL;
1553   tree tem = case_label_vec.pop ();
1554   gcc_assert (tem == last_case);
1555   sort_case_labels (case_label_vec);
1556 
1557   /* Build the switch statement, setting last_case to be the default
1558      label.  */
1559   switch_stmt = gimple_build_switch (finally_tmp, last_case,
1560 				     case_label_vec);
1561   gimple_set_location (switch_stmt, finally_loc);
1562 
1563   /* Need to link SWITCH_STMT after running replace_goto_queue
1564      due to not wanting to process the same goto stmts twice.  */
1565   gimple_seq_add_stmt (&tf->top_p_seq, switch_stmt);
1566   gimple_seq_add_seq (&tf->top_p_seq, switch_body);
1567 }
1568 
1569 /* Decide whether or not we are going to duplicate the finally block.
1570    There are several considerations.
1571 
1572    Second, we'd like to prevent egregious code growth.  One way to
1573    do this is to estimate the size of the finally block, multiply
1574    that by the number of copies we'd need to make, and compare against
1575    the estimate of the size of the switch machinery we'd have to add.  */
1576 
1577 static bool
decide_copy_try_finally(int ndests,bool may_throw,gimple_seq finally)1578 decide_copy_try_finally (int ndests, bool may_throw, gimple_seq finally)
1579 {
1580   int f_estimate, sw_estimate;
1581   geh_else *eh_else;
1582 
1583   /* If there's an EH_ELSE involved, the exception path is separate
1584      and really doesn't come into play for this computation.  */
1585   eh_else = get_eh_else (finally);
1586   if (eh_else)
1587     {
1588       ndests -= may_throw;
1589       finally = gimple_eh_else_n_body (eh_else);
1590     }
1591 
1592   if (!optimize)
1593     {
1594       gimple_stmt_iterator gsi;
1595 
1596       if (ndests == 1)
1597         return true;
1598 
1599       for (gsi = gsi_start (finally); !gsi_end_p (gsi); gsi_next (&gsi))
1600 	{
1601 	  /* Duplicate __builtin_stack_restore in the hope of eliminating it
1602 	     on the EH paths and, consequently, useless cleanups.  */
1603 	  gimple *stmt = gsi_stmt (gsi);
1604 	  if (!is_gimple_debug (stmt)
1605 	      && !gimple_clobber_p (stmt)
1606 	      && !gimple_call_builtin_p (stmt, BUILT_IN_STACK_RESTORE))
1607 	    return false;
1608 	}
1609       return true;
1610     }
1611 
1612   /* Finally estimate N times, plus N gotos.  */
1613   f_estimate = estimate_num_insns_seq (finally, &eni_size_weights);
1614   f_estimate = (f_estimate + 1) * ndests;
1615 
1616   /* Switch statement (cost 10), N variable assignments, N gotos.  */
1617   sw_estimate = 10 + 2 * ndests;
1618 
1619   /* Optimize for size clearly wants our best guess.  */
1620   if (optimize_function_for_size_p (cfun))
1621     return f_estimate < sw_estimate;
1622 
1623   /* ??? These numbers are completely made up so far.  */
1624   if (optimize > 1)
1625     return f_estimate < 100 || f_estimate < sw_estimate * 2;
1626   else
1627     return f_estimate < 40 || f_estimate * 2 < sw_estimate * 3;
1628 }
1629 
1630 /* REG is current region of a LEH state.
1631    is the enclosing region for a possible cleanup region, or the region
1632    itself.  Returns TRUE if such a region would be unreachable.
1633 
1634    Cleanup regions within a must-not-throw region aren't actually reachable
1635    even if there are throwing stmts within them, because the personality
1636    routine will call terminate before unwinding.  */
1637 
1638 static bool
cleanup_is_dead_in(leh_state * state)1639 cleanup_is_dead_in (leh_state *state)
1640 {
1641   if (flag_checking)
1642     {
1643       eh_region reg = state->cur_region;
1644       while (reg && reg->type == ERT_CLEANUP)
1645 	reg = reg->outer;
1646 
1647       gcc_assert (reg == state->outer_non_cleanup);
1648     }
1649 
1650   eh_region reg = state->outer_non_cleanup;
1651   return (reg && reg->type == ERT_MUST_NOT_THROW);
1652 }
1653 
1654 /* A subroutine of lower_eh_constructs_1.  Lower a GIMPLE_TRY_FINALLY nodes
1655    to a sequence of labels and blocks, plus the exception region trees
1656    that record all the magic.  This is complicated by the need to
1657    arrange for the FINALLY block to be executed on all exits.  */
1658 
1659 static gimple_seq
lower_try_finally(struct leh_state * state,gtry * tp)1660 lower_try_finally (struct leh_state *state, gtry *tp)
1661 {
1662   struct leh_tf_state this_tf;
1663   struct leh_state this_state;
1664   int ndests;
1665   gimple_seq old_eh_seq;
1666 
1667   /* Process the try block.  */
1668 
1669   memset (&this_tf, 0, sizeof (this_tf));
1670   this_tf.try_finally_expr = tp;
1671   this_tf.top_p = tp;
1672   this_tf.outer = state;
1673   if (using_eh_for_cleanups_p () && !cleanup_is_dead_in (state))
1674     {
1675       this_tf.region = gen_eh_region_cleanup (state->cur_region);
1676       this_state.cur_region = this_tf.region;
1677     }
1678   else
1679     {
1680       this_tf.region = NULL;
1681       this_state.cur_region = state->cur_region;
1682     }
1683 
1684   this_state.outer_non_cleanup = state->outer_non_cleanup;
1685   this_state.ehp_region = state->ehp_region;
1686   this_state.tf = &this_tf;
1687 
1688   old_eh_seq = eh_seq;
1689   eh_seq = NULL;
1690 
1691   lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1692 
1693   /* Determine if the try block is escaped through the bottom.  */
1694   this_tf.may_fallthru = gimple_seq_may_fallthru (gimple_try_eval (tp));
1695 
1696   /* Determine if any exceptions are possible within the try block.  */
1697   if (this_tf.region)
1698     this_tf.may_throw = eh_region_may_contain_throw (this_tf.region);
1699   if (this_tf.may_throw)
1700     honor_protect_cleanup_actions (state, &this_state, &this_tf);
1701 
1702   /* Determine how many edges (still) reach the finally block.  Or rather,
1703      how many destinations are reached by the finally block.  Use this to
1704      determine how we process the finally block itself.  */
1705 
1706   ndests = this_tf.dest_array.length ();
1707   ndests += this_tf.may_fallthru;
1708   ndests += this_tf.may_return;
1709   ndests += this_tf.may_throw;
1710 
1711   /* If the FINALLY block is not reachable, dike it out.  */
1712   if (ndests == 0)
1713     {
1714       gimple_seq_add_seq (&this_tf.top_p_seq, gimple_try_eval (tp));
1715       gimple_try_set_cleanup (tp, NULL);
1716     }
1717   /* If the finally block doesn't fall through, then any destination
1718      we might try to impose there isn't reached either.  There may be
1719      some minor amount of cleanup and redirection still needed.  */
1720   else if (!gimple_seq_may_fallthru (gimple_try_cleanup (tp)))
1721     lower_try_finally_nofallthru (state, &this_tf);
1722 
1723   /* We can easily special-case redirection to a single destination.  */
1724   else if (ndests == 1)
1725     lower_try_finally_onedest (state, &this_tf);
1726   else if (decide_copy_try_finally (ndests, this_tf.may_throw,
1727 				    gimple_try_cleanup (tp)))
1728     lower_try_finally_copy (state, &this_tf);
1729   else
1730     lower_try_finally_switch (state, &this_tf);
1731 
1732   /* If someone requested we add a label at the end of the transformed
1733      block, do so.  */
1734   if (this_tf.fallthru_label)
1735     {
1736       /* This must be reached only if ndests == 0. */
1737       gimple *x = gimple_build_label (this_tf.fallthru_label);
1738       gimple_seq_add_stmt (&this_tf.top_p_seq, x);
1739     }
1740 
1741   this_tf.dest_array.release ();
1742   free (this_tf.goto_queue);
1743   if (this_tf.goto_queue_map)
1744     delete this_tf.goto_queue_map;
1745 
1746   /* If there was an old (aka outer) eh_seq, append the current eh_seq.
1747      If there was no old eh_seq, then the append is trivially already done.  */
1748   if (old_eh_seq)
1749     {
1750       if (eh_seq == NULL)
1751 	eh_seq = old_eh_seq;
1752       else
1753 	{
1754 	  gimple_seq new_eh_seq = eh_seq;
1755 	  eh_seq = old_eh_seq;
1756 	  gimple_seq_add_seq (&eh_seq, new_eh_seq);
1757 	}
1758     }
1759 
1760   return this_tf.top_p_seq;
1761 }
1762 
1763 /* A subroutine of lower_eh_constructs_1.  Lower a GIMPLE_TRY_CATCH with a
1764    list of GIMPLE_CATCH to a sequence of labels and blocks, plus the
1765    exception region trees that records all the magic.  */
1766 
1767 static gimple_seq
lower_catch(struct leh_state * state,gtry * tp)1768 lower_catch (struct leh_state *state, gtry *tp)
1769 {
1770   eh_region try_region = NULL;
1771   struct leh_state this_state = *state;
1772   gimple_stmt_iterator gsi;
1773   tree out_label;
1774   gimple_seq new_seq, cleanup;
1775   gimple *x;
1776   geh_dispatch *eh_dispatch;
1777   location_t try_catch_loc = gimple_location (tp);
1778   location_t catch_loc = UNKNOWN_LOCATION;
1779 
1780   if (flag_exceptions)
1781     {
1782       try_region = gen_eh_region_try (state->cur_region);
1783       this_state.cur_region = try_region;
1784       this_state.outer_non_cleanup = this_state.cur_region;
1785     }
1786 
1787   lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1788 
1789   if (!eh_region_may_contain_throw (try_region))
1790     return gimple_try_eval (tp);
1791 
1792   new_seq = NULL;
1793   eh_dispatch = gimple_build_eh_dispatch (try_region->index);
1794   gimple_seq_add_stmt (&new_seq, eh_dispatch);
1795   emit_resx (&new_seq, try_region);
1796 
1797   this_state.cur_region = state->cur_region;
1798   this_state.outer_non_cleanup = state->outer_non_cleanup;
1799   this_state.ehp_region = try_region;
1800 
1801   /* Add eh_seq from lowering EH in the cleanup sequence after the cleanup
1802      itself, so that e.g. for coverage purposes the nested cleanups don't
1803      appear before the cleanup body.  See PR64634 for details.  */
1804   gimple_seq old_eh_seq = eh_seq;
1805   eh_seq = NULL;
1806 
1807   out_label = NULL;
1808   cleanup = gimple_try_cleanup (tp);
1809   for (gsi = gsi_start (cleanup);
1810        !gsi_end_p (gsi);
1811        gsi_next (&gsi))
1812     {
1813       eh_catch c;
1814       gcatch *catch_stmt;
1815       gimple_seq handler;
1816 
1817       catch_stmt = as_a <gcatch *> (gsi_stmt (gsi));
1818       if (catch_loc == UNKNOWN_LOCATION)
1819 	catch_loc = gimple_location (catch_stmt);
1820       c = gen_eh_region_catch (try_region, gimple_catch_types (catch_stmt));
1821 
1822       handler = gimple_catch_handler (catch_stmt);
1823       lower_eh_constructs_1 (&this_state, &handler);
1824 
1825       c->label = create_artificial_label (UNKNOWN_LOCATION);
1826       x = gimple_build_label (c->label);
1827       gimple_seq_add_stmt (&new_seq, x);
1828 
1829       gimple_seq_add_seq (&new_seq, handler);
1830 
1831       if (gimple_seq_may_fallthru (new_seq))
1832 	{
1833 	  if (!out_label)
1834 	    out_label = create_artificial_label (try_catch_loc);
1835 
1836 	  x = gimple_build_goto (out_label);
1837 	  gimple_seq_add_stmt (&new_seq, x);
1838 	}
1839       if (!c->type_list)
1840 	break;
1841     }
1842 
1843   /* Try to set a location on the dispatching construct to avoid inheriting
1844      the location of the previous statement.  */
1845   gimple_set_location (eh_dispatch, catch_loc);
1846 
1847   gimple_try_set_cleanup (tp, new_seq);
1848 
1849   gimple_seq new_eh_seq = eh_seq;
1850   eh_seq = old_eh_seq;
1851   gimple_seq ret_seq = frob_into_branch_around (tp, try_region, out_label);
1852   gimple_seq_add_seq (&eh_seq, new_eh_seq);
1853   return ret_seq;
1854 }
1855 
1856 /* A subroutine of lower_eh_constructs_1.  Lower a GIMPLE_TRY with a
1857    GIMPLE_EH_FILTER to a sequence of labels and blocks, plus the exception
1858    region trees that record all the magic.  */
1859 
1860 static gimple_seq
lower_eh_filter(struct leh_state * state,gtry * tp)1861 lower_eh_filter (struct leh_state *state, gtry *tp)
1862 {
1863   struct leh_state this_state = *state;
1864   eh_region this_region = NULL;
1865   gimple *inner, *x;
1866   gimple_seq new_seq;
1867 
1868   inner = gimple_seq_first_stmt (gimple_try_cleanup (tp));
1869 
1870   if (flag_exceptions)
1871     {
1872       this_region = gen_eh_region_allowed (state->cur_region,
1873 				           gimple_eh_filter_types (inner));
1874       this_state.cur_region = this_region;
1875       this_state.outer_non_cleanup = this_state.cur_region;
1876     }
1877 
1878   lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1879 
1880   if (!eh_region_may_contain_throw (this_region))
1881     return gimple_try_eval (tp);
1882 
1883   this_state.cur_region = state->cur_region;
1884   this_state.ehp_region = this_region;
1885 
1886   new_seq = NULL;
1887   x = gimple_build_eh_dispatch (this_region->index);
1888   gimple_set_location (x, gimple_location (tp));
1889   gimple_seq_add_stmt (&new_seq, x);
1890   emit_resx (&new_seq, this_region);
1891 
1892   this_region->u.allowed.label = create_artificial_label (UNKNOWN_LOCATION);
1893   x = gimple_build_label (this_region->u.allowed.label);
1894   gimple_seq_add_stmt (&new_seq, x);
1895 
1896   lower_eh_constructs_1 (&this_state, gimple_eh_filter_failure_ptr (inner));
1897   gimple_seq_add_seq (&new_seq, gimple_eh_filter_failure (inner));
1898 
1899   gimple_try_set_cleanup (tp, new_seq);
1900 
1901   return frob_into_branch_around (tp, this_region, NULL);
1902 }
1903 
1904 /* A subroutine of lower_eh_constructs_1.  Lower a GIMPLE_TRY with
1905    an GIMPLE_EH_MUST_NOT_THROW to a sequence of labels and blocks,
1906    plus the exception region trees that record all the magic.  */
1907 
1908 static gimple_seq
lower_eh_must_not_throw(struct leh_state * state,gtry * tp)1909 lower_eh_must_not_throw (struct leh_state *state, gtry *tp)
1910 {
1911   struct leh_state this_state = *state;
1912 
1913   if (flag_exceptions)
1914     {
1915       gimple *inner = gimple_seq_first_stmt (gimple_try_cleanup (tp));
1916       eh_region this_region;
1917 
1918       this_region = gen_eh_region_must_not_throw (state->cur_region);
1919       this_region->u.must_not_throw.failure_decl
1920 	= gimple_eh_must_not_throw_fndecl (
1921 	    as_a <geh_mnt *> (inner));
1922       this_region->u.must_not_throw.failure_loc
1923 	= LOCATION_LOCUS (gimple_location (tp));
1924 
1925       /* In order to get mangling applied to this decl, we must mark it
1926 	 used now.  Otherwise, pass_ipa_free_lang_data won't think it
1927 	 needs to happen.  */
1928       TREE_USED (this_region->u.must_not_throw.failure_decl) = 1;
1929 
1930       this_state.cur_region = this_region;
1931       this_state.outer_non_cleanup = this_state.cur_region;
1932     }
1933 
1934   lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1935 
1936   return gimple_try_eval (tp);
1937 }
1938 
1939 /* Implement a cleanup expression.  This is similar to try-finally,
1940    except that we only execute the cleanup block for exception edges.  */
1941 
1942 static gimple_seq
lower_cleanup(struct leh_state * state,gtry * tp)1943 lower_cleanup (struct leh_state *state, gtry *tp)
1944 {
1945   struct leh_state this_state = *state;
1946   eh_region this_region = NULL;
1947   struct leh_tf_state fake_tf;
1948   gimple_seq result;
1949   bool cleanup_dead = cleanup_is_dead_in (state);
1950 
1951   if (flag_exceptions && !cleanup_dead)
1952     {
1953       this_region = gen_eh_region_cleanup (state->cur_region);
1954       this_state.cur_region = this_region;
1955       this_state.outer_non_cleanup = state->outer_non_cleanup;
1956     }
1957 
1958   lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1959 
1960   if (cleanup_dead || !eh_region_may_contain_throw (this_region))
1961     return gimple_try_eval (tp);
1962 
1963   /* Build enough of a try-finally state so that we can reuse
1964      honor_protect_cleanup_actions.  */
1965   memset (&fake_tf, 0, sizeof (fake_tf));
1966   fake_tf.top_p = fake_tf.try_finally_expr = tp;
1967   fake_tf.outer = state;
1968   fake_tf.region = this_region;
1969   fake_tf.may_fallthru = gimple_seq_may_fallthru (gimple_try_eval (tp));
1970   fake_tf.may_throw = true;
1971 
1972   honor_protect_cleanup_actions (state, NULL, &fake_tf);
1973 
1974   if (fake_tf.may_throw)
1975     {
1976       /* In this case honor_protect_cleanup_actions had nothing to do,
1977 	 and we should process this normally.  */
1978       lower_eh_constructs_1 (state, gimple_try_cleanup_ptr (tp));
1979       result = frob_into_branch_around (tp, this_region,
1980                                         fake_tf.fallthru_label);
1981     }
1982   else
1983     {
1984       /* In this case honor_protect_cleanup_actions did nearly all of
1985 	 the work.  All we have left is to append the fallthru_label.  */
1986 
1987       result = gimple_try_eval (tp);
1988       if (fake_tf.fallthru_label)
1989 	{
1990 	  gimple *x = gimple_build_label (fake_tf.fallthru_label);
1991 	  gimple_seq_add_stmt (&result, x);
1992 	}
1993     }
1994   return result;
1995 }
1996 
1997 /* Main loop for lowering eh constructs. Also moves gsi to the next
1998    statement. */
1999 
2000 static void
lower_eh_constructs_2(struct leh_state * state,gimple_stmt_iterator * gsi)2001 lower_eh_constructs_2 (struct leh_state *state, gimple_stmt_iterator *gsi)
2002 {
2003   gimple_seq replace;
2004   gimple *x;
2005   gimple *stmt = gsi_stmt (*gsi);
2006 
2007   switch (gimple_code (stmt))
2008     {
2009     case GIMPLE_CALL:
2010       {
2011 	tree fndecl = gimple_call_fndecl (stmt);
2012 	tree rhs, lhs;
2013 
2014 	if (fndecl && fndecl_built_in_p (fndecl, BUILT_IN_NORMAL))
2015 	  switch (DECL_FUNCTION_CODE (fndecl))
2016 	    {
2017 	    case BUILT_IN_EH_POINTER:
2018 	      /* The front end may have generated a call to
2019 		 __builtin_eh_pointer (0) within a catch region.  Replace
2020 		 this zero argument with the current catch region number.  */
2021 	      if (state->ehp_region)
2022 		{
2023 		  tree nr = build_int_cst (integer_type_node,
2024 					   state->ehp_region->index);
2025 		  gimple_call_set_arg (stmt, 0, nr);
2026 		}
2027 	      else
2028 		{
2029 		  /* The user has dome something silly.  Remove it.  */
2030 		  rhs = null_pointer_node;
2031 		  goto do_replace;
2032 		}
2033 	      break;
2034 
2035 	    case BUILT_IN_EH_FILTER:
2036 	      /* ??? This should never appear, but since it's a builtin it
2037 		 is accessible to abuse by users.  Just remove it and
2038 		 replace the use with the arbitrary value zero.  */
2039 	      rhs = build_int_cst (TREE_TYPE (TREE_TYPE (fndecl)), 0);
2040 	    do_replace:
2041 	      lhs = gimple_call_lhs (stmt);
2042 	      x = gimple_build_assign (lhs, rhs);
2043 	      gsi_insert_before (gsi, x, GSI_SAME_STMT);
2044 	      /* FALLTHRU */
2045 
2046 	    case BUILT_IN_EH_COPY_VALUES:
2047 	      /* Likewise this should not appear.  Remove it.  */
2048 	      gsi_remove (gsi, true);
2049 	      return;
2050 
2051 	    default:
2052 	      break;
2053 	    }
2054       }
2055       /* FALLTHRU */
2056 
2057     case GIMPLE_ASSIGN:
2058       /* If the stmt can throw, use a new temporary for the assignment
2059          to a LHS.  This makes sure the old value of the LHS is
2060 	 available on the EH edge.  Only do so for statements that
2061 	 potentially fall through (no noreturn calls e.g.), otherwise
2062 	 this new assignment might create fake fallthru regions.  */
2063       if (stmt_could_throw_p (cfun, stmt)
2064 	  && gimple_has_lhs (stmt)
2065 	  && gimple_stmt_may_fallthru (stmt)
2066 	  && !tree_could_throw_p (gimple_get_lhs (stmt))
2067 	  && is_gimple_reg_type (TREE_TYPE (gimple_get_lhs (stmt))))
2068 	{
2069 	  tree lhs = gimple_get_lhs (stmt);
2070 	  tree tmp = create_tmp_var (TREE_TYPE (lhs));
2071 	  gimple *s = gimple_build_assign (lhs, tmp);
2072 	  gimple_set_location (s, gimple_location (stmt));
2073 	  gimple_set_block (s, gimple_block (stmt));
2074 	  gimple_set_lhs (stmt, tmp);
2075 	  if (TREE_CODE (TREE_TYPE (tmp)) == COMPLEX_TYPE
2076 	      || TREE_CODE (TREE_TYPE (tmp)) == VECTOR_TYPE)
2077 	    DECL_GIMPLE_REG_P (tmp) = 1;
2078 	  gsi_insert_after (gsi, s, GSI_SAME_STMT);
2079 	}
2080       /* Look for things that can throw exceptions, and record them.  */
2081       if (state->cur_region && stmt_could_throw_p (cfun, stmt))
2082 	{
2083 	  record_stmt_eh_region (state->cur_region, stmt);
2084 	  note_eh_region_may_contain_throw (state->cur_region);
2085 	}
2086       break;
2087 
2088     case GIMPLE_COND:
2089     case GIMPLE_GOTO:
2090     case GIMPLE_RETURN:
2091       maybe_record_in_goto_queue (state, stmt);
2092       break;
2093 
2094     case GIMPLE_SWITCH:
2095       verify_norecord_switch_expr (state, as_a <gswitch *> (stmt));
2096       break;
2097 
2098     case GIMPLE_TRY:
2099       {
2100 	gtry *try_stmt = as_a <gtry *> (stmt);
2101 	if (gimple_try_kind (try_stmt) == GIMPLE_TRY_FINALLY)
2102 	  replace = lower_try_finally (state, try_stmt);
2103 	else
2104 	  {
2105 	    x = gimple_seq_first_stmt (gimple_try_cleanup (try_stmt));
2106 	    if (!x)
2107 	      {
2108 		replace = gimple_try_eval (try_stmt);
2109 		lower_eh_constructs_1 (state, &replace);
2110 	      }
2111 	    else
2112 	      switch (gimple_code (x))
2113 		{
2114 		case GIMPLE_CATCH:
2115 		  replace = lower_catch (state, try_stmt);
2116 		  break;
2117 		case GIMPLE_EH_FILTER:
2118 		  replace = lower_eh_filter (state, try_stmt);
2119 		  break;
2120 		case GIMPLE_EH_MUST_NOT_THROW:
2121 		  replace = lower_eh_must_not_throw (state, try_stmt);
2122 		  break;
2123 		case GIMPLE_EH_ELSE:
2124 		  /* This code is only valid with GIMPLE_TRY_FINALLY.  */
2125 		  gcc_unreachable ();
2126 		default:
2127 		  replace = lower_cleanup (state, try_stmt);
2128 		  break;
2129 		}
2130 	  }
2131       }
2132 
2133       /* Remove the old stmt and insert the transformed sequence
2134 	 instead. */
2135       gsi_insert_seq_before (gsi, replace, GSI_SAME_STMT);
2136       gsi_remove (gsi, true);
2137 
2138       /* Return since we don't want gsi_next () */
2139       return;
2140 
2141     case GIMPLE_EH_ELSE:
2142       /* We should be eliminating this in lower_try_finally et al.  */
2143       gcc_unreachable ();
2144 
2145     default:
2146       /* A type, a decl, or some kind of statement that we're not
2147 	 interested in.  Don't walk them.  */
2148       break;
2149     }
2150 
2151   gsi_next (gsi);
2152 }
2153 
2154 /* A helper to unwrap a gimple_seq and feed stmts to lower_eh_constructs_2. */
2155 
2156 static void
lower_eh_constructs_1(struct leh_state * state,gimple_seq * pseq)2157 lower_eh_constructs_1 (struct leh_state *state, gimple_seq *pseq)
2158 {
2159   gimple_stmt_iterator gsi;
2160   for (gsi = gsi_start (*pseq); !gsi_end_p (gsi);)
2161     lower_eh_constructs_2 (state, &gsi);
2162 }
2163 
2164 namespace {
2165 
2166 const pass_data pass_data_lower_eh =
2167 {
2168   GIMPLE_PASS, /* type */
2169   "eh", /* name */
2170   OPTGROUP_NONE, /* optinfo_flags */
2171   TV_TREE_EH, /* tv_id */
2172   PROP_gimple_lcf, /* properties_required */
2173   PROP_gimple_leh, /* properties_provided */
2174   0, /* properties_destroyed */
2175   0, /* todo_flags_start */
2176   0, /* todo_flags_finish */
2177 };
2178 
2179 class pass_lower_eh : public gimple_opt_pass
2180 {
2181 public:
pass_lower_eh(gcc::context * ctxt)2182   pass_lower_eh (gcc::context *ctxt)
2183     : gimple_opt_pass (pass_data_lower_eh, ctxt)
2184   {}
2185 
2186   /* opt_pass methods: */
2187   virtual unsigned int execute (function *);
2188 
2189 }; // class pass_lower_eh
2190 
2191 unsigned int
execute(function * fun)2192 pass_lower_eh::execute (function *fun)
2193 {
2194   struct leh_state null_state;
2195   gimple_seq bodyp;
2196 
2197   bodyp = gimple_body (current_function_decl);
2198   if (bodyp == NULL)
2199     return 0;
2200 
2201   finally_tree = new hash_table<finally_tree_hasher> (31);
2202   eh_region_may_contain_throw_map = BITMAP_ALLOC (NULL);
2203   memset (&null_state, 0, sizeof (null_state));
2204 
2205   collect_finally_tree_1 (bodyp, NULL);
2206   lower_eh_constructs_1 (&null_state, &bodyp);
2207   gimple_set_body (current_function_decl, bodyp);
2208 
2209   /* We assume there's a return statement, or something, at the end of
2210      the function, and thus ploping the EH sequence afterward won't
2211      change anything.  */
2212   gcc_assert (!gimple_seq_may_fallthru (bodyp));
2213   gimple_seq_add_seq (&bodyp, eh_seq);
2214 
2215   /* We assume that since BODYP already existed, adding EH_SEQ to it
2216      didn't change its value, and we don't have to re-set the function.  */
2217   gcc_assert (bodyp == gimple_body (current_function_decl));
2218 
2219   delete finally_tree;
2220   finally_tree = NULL;
2221   BITMAP_FREE (eh_region_may_contain_throw_map);
2222   eh_seq = NULL;
2223 
2224   /* If this function needs a language specific EH personality routine
2225      and the frontend didn't already set one do so now.  */
2226   if (function_needs_eh_personality (fun) == eh_personality_lang
2227       && !DECL_FUNCTION_PERSONALITY (current_function_decl))
2228     DECL_FUNCTION_PERSONALITY (current_function_decl)
2229       = lang_hooks.eh_personality ();
2230 
2231   return 0;
2232 }
2233 
2234 } // anon namespace
2235 
2236 gimple_opt_pass *
make_pass_lower_eh(gcc::context * ctxt)2237 make_pass_lower_eh (gcc::context *ctxt)
2238 {
2239   return new pass_lower_eh (ctxt);
2240 }
2241 
2242 /* Create the multiple edges from an EH_DISPATCH statement to all of
2243    the possible handlers for its EH region.  Return true if there's
2244    no fallthru edge; false if there is.  */
2245 
2246 bool
make_eh_dispatch_edges(geh_dispatch * stmt)2247 make_eh_dispatch_edges (geh_dispatch *stmt)
2248 {
2249   eh_region r;
2250   eh_catch c;
2251   basic_block src, dst;
2252 
2253   r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
2254   src = gimple_bb (stmt);
2255 
2256   switch (r->type)
2257     {
2258     case ERT_TRY:
2259       for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
2260 	{
2261 	  dst = label_to_block (cfun, c->label);
2262 	  make_edge (src, dst, 0);
2263 
2264 	  /* A catch-all handler doesn't have a fallthru.  */
2265 	  if (c->type_list == NULL)
2266 	    return false;
2267 	}
2268       break;
2269 
2270     case ERT_ALLOWED_EXCEPTIONS:
2271       dst = label_to_block (cfun, r->u.allowed.label);
2272       make_edge (src, dst, 0);
2273       break;
2274 
2275     default:
2276       gcc_unreachable ();
2277     }
2278 
2279   return true;
2280 }
2281 
2282 /* Create the single EH edge from STMT to its nearest landing pad,
2283    if there is such a landing pad within the current function.  */
2284 
2285 void
make_eh_edges(gimple * stmt)2286 make_eh_edges (gimple *stmt)
2287 {
2288   basic_block src, dst;
2289   eh_landing_pad lp;
2290   int lp_nr;
2291 
2292   lp_nr = lookup_stmt_eh_lp (stmt);
2293   if (lp_nr <= 0)
2294     return;
2295 
2296   lp = get_eh_landing_pad_from_number (lp_nr);
2297   gcc_assert (lp != NULL);
2298 
2299   src = gimple_bb (stmt);
2300   dst = label_to_block (cfun, lp->post_landing_pad);
2301   make_edge (src, dst, EDGE_EH);
2302 }
2303 
2304 /* Do the work in redirecting EDGE_IN to NEW_BB within the EH region tree;
2305    do not actually perform the final edge redirection.
2306 
2307    CHANGE_REGION is true when we're being called from cleanup_empty_eh and
2308    we intend to change the destination EH region as well; this means
2309    EH_LANDING_PAD_NR must already be set on the destination block label.
2310    If false, we're being called from generic cfg manipulation code and we
2311    should preserve our place within the region tree.  */
2312 
2313 static void
redirect_eh_edge_1(edge edge_in,basic_block new_bb,bool change_region)2314 redirect_eh_edge_1 (edge edge_in, basic_block new_bb, bool change_region)
2315 {
2316   eh_landing_pad old_lp, new_lp;
2317   basic_block old_bb;
2318   gimple *throw_stmt;
2319   int old_lp_nr, new_lp_nr;
2320   tree old_label, new_label;
2321   edge_iterator ei;
2322   edge e;
2323 
2324   old_bb = edge_in->dest;
2325   old_label = gimple_block_label (old_bb);
2326   old_lp_nr = EH_LANDING_PAD_NR (old_label);
2327   gcc_assert (old_lp_nr > 0);
2328   old_lp = get_eh_landing_pad_from_number (old_lp_nr);
2329 
2330   throw_stmt = last_stmt (edge_in->src);
2331   gcc_checking_assert (lookup_stmt_eh_lp (throw_stmt) == old_lp_nr);
2332 
2333   new_label = gimple_block_label (new_bb);
2334 
2335   /* Look for an existing region that might be using NEW_BB already.  */
2336   new_lp_nr = EH_LANDING_PAD_NR (new_label);
2337   if (new_lp_nr)
2338     {
2339       new_lp = get_eh_landing_pad_from_number (new_lp_nr);
2340       gcc_assert (new_lp);
2341 
2342       /* Unless CHANGE_REGION is true, the new and old landing pad
2343 	 had better be associated with the same EH region.  */
2344       gcc_assert (change_region || new_lp->region == old_lp->region);
2345     }
2346   else
2347     {
2348       new_lp = NULL;
2349       gcc_assert (!change_region);
2350     }
2351 
2352   /* Notice when we redirect the last EH edge away from OLD_BB.  */
2353   FOR_EACH_EDGE (e, ei, old_bb->preds)
2354     if (e != edge_in && (e->flags & EDGE_EH))
2355       break;
2356 
2357   if (new_lp)
2358     {
2359       /* NEW_LP already exists.  If there are still edges into OLD_LP,
2360 	 there's nothing to do with the EH tree.  If there are no more
2361 	 edges into OLD_LP, then we want to remove OLD_LP as it is unused.
2362 	 If CHANGE_REGION is true, then our caller is expecting to remove
2363 	 the landing pad.  */
2364       if (e == NULL && !change_region)
2365 	remove_eh_landing_pad (old_lp);
2366     }
2367   else
2368     {
2369       /* No correct landing pad exists.  If there are no more edges
2370 	 into OLD_LP, then we can simply re-use the existing landing pad.
2371 	 Otherwise, we have to create a new landing pad.  */
2372       if (e == NULL)
2373 	{
2374 	  EH_LANDING_PAD_NR (old_lp->post_landing_pad) = 0;
2375 	  new_lp = old_lp;
2376 	}
2377       else
2378 	new_lp = gen_eh_landing_pad (old_lp->region);
2379       new_lp->post_landing_pad = new_label;
2380       EH_LANDING_PAD_NR (new_label) = new_lp->index;
2381     }
2382 
2383   /* Maybe move the throwing statement to the new region.  */
2384   if (old_lp != new_lp)
2385     {
2386       remove_stmt_from_eh_lp (throw_stmt);
2387       add_stmt_to_eh_lp (throw_stmt, new_lp->index);
2388     }
2389 }
2390 
2391 /* Redirect EH edge E to NEW_BB.  */
2392 
2393 edge
redirect_eh_edge(edge edge_in,basic_block new_bb)2394 redirect_eh_edge (edge edge_in, basic_block new_bb)
2395 {
2396   redirect_eh_edge_1 (edge_in, new_bb, false);
2397   return ssa_redirect_edge (edge_in, new_bb);
2398 }
2399 
2400 /* This is a subroutine of gimple_redirect_edge_and_branch.  Update the
2401    labels for redirecting a non-fallthru EH_DISPATCH edge E to NEW_BB.
2402    The actual edge update will happen in the caller.  */
2403 
2404 void
redirect_eh_dispatch_edge(geh_dispatch * stmt,edge e,basic_block new_bb)2405 redirect_eh_dispatch_edge (geh_dispatch *stmt, edge e, basic_block new_bb)
2406 {
2407   tree new_lab = gimple_block_label (new_bb);
2408   bool any_changed = false;
2409   basic_block old_bb;
2410   eh_region r;
2411   eh_catch c;
2412 
2413   r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
2414   switch (r->type)
2415     {
2416     case ERT_TRY:
2417       for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
2418 	{
2419 	  old_bb = label_to_block (cfun, c->label);
2420 	  if (old_bb == e->dest)
2421 	    {
2422 	      c->label = new_lab;
2423 	      any_changed = true;
2424 	    }
2425 	}
2426       break;
2427 
2428     case ERT_ALLOWED_EXCEPTIONS:
2429       old_bb = label_to_block (cfun, r->u.allowed.label);
2430       gcc_assert (old_bb == e->dest);
2431       r->u.allowed.label = new_lab;
2432       any_changed = true;
2433       break;
2434 
2435     default:
2436       gcc_unreachable ();
2437     }
2438 
2439   gcc_assert (any_changed);
2440 }
2441 
2442 /* Helper function for operation_could_trap_p and stmt_could_throw_p.  */
2443 
2444 bool
operation_could_trap_helper_p(enum tree_code op,bool fp_operation,bool honor_trapv,bool honor_nans,bool honor_snans,tree divisor,bool * handled)2445 operation_could_trap_helper_p (enum tree_code op,
2446 			       bool fp_operation,
2447 			       bool honor_trapv,
2448 			       bool honor_nans,
2449 			       bool honor_snans,
2450 			       tree divisor,
2451 			       bool *handled)
2452 {
2453   *handled = true;
2454   switch (op)
2455     {
2456     case TRUNC_DIV_EXPR:
2457     case CEIL_DIV_EXPR:
2458     case FLOOR_DIV_EXPR:
2459     case ROUND_DIV_EXPR:
2460     case EXACT_DIV_EXPR:
2461     case CEIL_MOD_EXPR:
2462     case FLOOR_MOD_EXPR:
2463     case ROUND_MOD_EXPR:
2464     case TRUNC_MOD_EXPR:
2465       if (!TREE_CONSTANT (divisor) || integer_zerop (divisor))
2466         return true;
2467       if (TREE_CODE (divisor) == VECTOR_CST)
2468 	{
2469 	  /* Inspired by initializer_each_zero_or_onep.  */
2470 	  unsigned HOST_WIDE_INT nelts = vector_cst_encoded_nelts (divisor);
2471 	  if (VECTOR_CST_STEPPED_P (divisor)
2472 	      && !TYPE_VECTOR_SUBPARTS (TREE_TYPE (divisor))
2473 		    .is_constant (&nelts))
2474 	    return true;
2475 	  for (unsigned int i = 0; i < nelts; ++i)
2476 	    {
2477 	      tree elt = vector_cst_elt (divisor, i);
2478 	      if (integer_zerop (elt))
2479 		return true;
2480 	    }
2481 	}
2482       return false;
2483 
2484     case RDIV_EXPR:
2485       if (fp_operation)
2486 	{
2487 	  if (honor_snans)
2488 	    return true;
2489 	  return flag_trapping_math;
2490 	}
2491       /* Fixed point operations also use RDIV_EXPR.  */
2492       if (!TREE_CONSTANT (divisor) || fixed_zerop (divisor))
2493 	return true;
2494       return false;
2495 
2496     case LT_EXPR:
2497     case LE_EXPR:
2498     case GT_EXPR:
2499     case GE_EXPR:
2500     case LTGT_EXPR:
2501       /* Some floating point comparisons may trap.  */
2502       return honor_nans;
2503 
2504     case EQ_EXPR:
2505     case NE_EXPR:
2506     case UNORDERED_EXPR:
2507     case ORDERED_EXPR:
2508     case UNLT_EXPR:
2509     case UNLE_EXPR:
2510     case UNGT_EXPR:
2511     case UNGE_EXPR:
2512     case UNEQ_EXPR:
2513       return honor_snans;
2514 
2515     case NEGATE_EXPR:
2516     case ABS_EXPR:
2517     case CONJ_EXPR:
2518       /* These operations don't trap with floating point.  */
2519       if (honor_trapv)
2520 	return true;
2521       return false;
2522 
2523     case ABSU_EXPR:
2524       /* ABSU_EXPR never traps.  */
2525       return false;
2526 
2527     case PLUS_EXPR:
2528     case MINUS_EXPR:
2529     case MULT_EXPR:
2530       /* Any floating arithmetic may trap.  */
2531       if (fp_operation && flag_trapping_math)
2532 	return true;
2533       if (honor_trapv)
2534 	return true;
2535       return false;
2536 
2537     case COMPLEX_EXPR:
2538     case CONSTRUCTOR:
2539       /* Constructing an object cannot trap.  */
2540       return false;
2541 
2542     case COND_EXPR:
2543     case VEC_COND_EXPR:
2544       /* Whether *COND_EXPR can trap depends on whether the
2545 	 first argument can trap, so signal it as not handled.
2546 	 Whether lhs is floating or not doesn't matter.  */
2547       *handled = false;
2548       return false;
2549 
2550     default:
2551       /* Any floating arithmetic may trap.  */
2552       if (fp_operation && flag_trapping_math)
2553 	return true;
2554 
2555       *handled = false;
2556       return false;
2557     }
2558 }
2559 
2560 /* Return true if operation OP may trap.  FP_OPERATION is true if OP is applied
2561    on floating-point values.  HONOR_TRAPV is true if OP is applied on integer
2562    type operands that may trap.  If OP is a division operator, DIVISOR contains
2563    the value of the divisor.  */
2564 
2565 bool
operation_could_trap_p(enum tree_code op,bool fp_operation,bool honor_trapv,tree divisor)2566 operation_could_trap_p (enum tree_code op, bool fp_operation, bool honor_trapv,
2567 			tree divisor)
2568 {
2569   bool honor_nans = (fp_operation && flag_trapping_math
2570 		     && !flag_finite_math_only);
2571   bool honor_snans = fp_operation && flag_signaling_nans != 0;
2572   bool handled;
2573 
2574   /* This function cannot tell whether or not COND_EXPR and VEC_COND_EXPR could
2575      trap, because that depends on the respective condition op.  */
2576   gcc_assert (op != COND_EXPR && op != VEC_COND_EXPR);
2577 
2578   if (TREE_CODE_CLASS (op) != tcc_comparison
2579       && TREE_CODE_CLASS (op) != tcc_unary
2580       && TREE_CODE_CLASS (op) != tcc_binary)
2581     return false;
2582 
2583   return operation_could_trap_helper_p (op, fp_operation, honor_trapv,
2584 					honor_nans, honor_snans, divisor,
2585 					&handled);
2586 }
2587 
2588 
2589 /* Returns true if it is possible to prove that the index of
2590    an array access REF (an ARRAY_REF expression) falls into the
2591    array bounds.  */
2592 
2593 static bool
in_array_bounds_p(tree ref)2594 in_array_bounds_p (tree ref)
2595 {
2596   tree idx = TREE_OPERAND (ref, 1);
2597   tree min, max;
2598 
2599   if (TREE_CODE (idx) != INTEGER_CST)
2600     return false;
2601 
2602   min = array_ref_low_bound (ref);
2603   max = array_ref_up_bound (ref);
2604   if (!min
2605       || !max
2606       || TREE_CODE (min) != INTEGER_CST
2607       || TREE_CODE (max) != INTEGER_CST)
2608     return false;
2609 
2610   if (tree_int_cst_lt (idx, min)
2611       || tree_int_cst_lt (max, idx))
2612     return false;
2613 
2614   return true;
2615 }
2616 
2617 /* Returns true if it is possible to prove that the range of
2618    an array access REF (an ARRAY_RANGE_REF expression) falls
2619    into the array bounds.  */
2620 
2621 static bool
range_in_array_bounds_p(tree ref)2622 range_in_array_bounds_p (tree ref)
2623 {
2624   tree domain_type = TYPE_DOMAIN (TREE_TYPE (ref));
2625   tree range_min, range_max, min, max;
2626 
2627   range_min = TYPE_MIN_VALUE (domain_type);
2628   range_max = TYPE_MAX_VALUE (domain_type);
2629   if (!range_min
2630       || !range_max
2631       || TREE_CODE (range_min) != INTEGER_CST
2632       || TREE_CODE (range_max) != INTEGER_CST)
2633     return false;
2634 
2635   min = array_ref_low_bound (ref);
2636   max = array_ref_up_bound (ref);
2637   if (!min
2638       || !max
2639       || TREE_CODE (min) != INTEGER_CST
2640       || TREE_CODE (max) != INTEGER_CST)
2641     return false;
2642 
2643   if (tree_int_cst_lt (range_min, min)
2644       || tree_int_cst_lt (max, range_max))
2645     return false;
2646 
2647   return true;
2648 }
2649 
2650 /* Return true if EXPR can trap, as in dereferencing an invalid pointer
2651    location or floating point arithmetic.  C.f. the rtl version, may_trap_p.
2652    This routine expects only GIMPLE lhs or rhs input.  */
2653 
2654 bool
tree_could_trap_p(tree expr)2655 tree_could_trap_p (tree expr)
2656 {
2657   enum tree_code code;
2658   bool fp_operation = false;
2659   bool honor_trapv = false;
2660   tree t, base, div = NULL_TREE;
2661 
2662   if (!expr)
2663     return false;
2664 
2665   /* In COND_EXPR and VEC_COND_EXPR only the condition may trap, but
2666      they won't appear as operands in GIMPLE form, so this is just for the
2667      GENERIC uses where it needs to recurse on the operands and so
2668      *COND_EXPR itself doesn't trap.  */
2669   if (TREE_CODE (expr) == COND_EXPR || TREE_CODE (expr) == VEC_COND_EXPR)
2670     return false;
2671 
2672   code = TREE_CODE (expr);
2673   t = TREE_TYPE (expr);
2674 
2675   if (t)
2676     {
2677       if (COMPARISON_CLASS_P (expr))
2678 	fp_operation = FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (expr, 0)));
2679       else
2680 	fp_operation = FLOAT_TYPE_P (t);
2681       honor_trapv = INTEGRAL_TYPE_P (t) && TYPE_OVERFLOW_TRAPS (t);
2682     }
2683 
2684   if (TREE_CODE_CLASS (code) == tcc_binary)
2685     div = TREE_OPERAND (expr, 1);
2686   if (operation_could_trap_p (code, fp_operation, honor_trapv, div))
2687     return true;
2688 
2689  restart:
2690   switch (code)
2691     {
2692     case COMPONENT_REF:
2693     case REALPART_EXPR:
2694     case IMAGPART_EXPR:
2695     case BIT_FIELD_REF:
2696     case VIEW_CONVERT_EXPR:
2697     case WITH_SIZE_EXPR:
2698       expr = TREE_OPERAND (expr, 0);
2699       code = TREE_CODE (expr);
2700       goto restart;
2701 
2702     case ARRAY_RANGE_REF:
2703       base = TREE_OPERAND (expr, 0);
2704       if (tree_could_trap_p (base))
2705 	return true;
2706       if (TREE_THIS_NOTRAP (expr))
2707 	return false;
2708       return !range_in_array_bounds_p (expr);
2709 
2710     case ARRAY_REF:
2711       base = TREE_OPERAND (expr, 0);
2712       if (tree_could_trap_p (base))
2713 	return true;
2714       if (TREE_THIS_NOTRAP (expr))
2715 	return false;
2716       return !in_array_bounds_p (expr);
2717 
2718     case TARGET_MEM_REF:
2719     case MEM_REF:
2720       if (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR
2721 	  && tree_could_trap_p (TREE_OPERAND (TREE_OPERAND (expr, 0), 0)))
2722 	return true;
2723       if (TREE_THIS_NOTRAP (expr))
2724 	return false;
2725       /* We cannot prove that the access is in-bounds when we have
2726          variable-index TARGET_MEM_REFs.  */
2727       if (code == TARGET_MEM_REF
2728 	  && (TMR_INDEX (expr) || TMR_INDEX2 (expr)))
2729 	return true;
2730       if (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR)
2731 	{
2732 	  tree base = TREE_OPERAND (TREE_OPERAND (expr, 0), 0);
2733 	  poly_offset_int off = mem_ref_offset (expr);
2734 	  if (maybe_lt (off, 0))
2735 	    return true;
2736 	  if (TREE_CODE (base) == STRING_CST)
2737 	    return maybe_le (TREE_STRING_LENGTH (base), off);
2738 	  tree size = DECL_SIZE_UNIT (base);
2739 	  if (size == NULL_TREE
2740 	      || !poly_int_tree_p (size)
2741 	      || maybe_le (wi::to_poly_offset (size), off))
2742 	    return true;
2743 	  /* Now we are sure the first byte of the access is inside
2744 	     the object.  */
2745 	  return false;
2746 	}
2747       return true;
2748 
2749     case INDIRECT_REF:
2750       return !TREE_THIS_NOTRAP (expr);
2751 
2752     case ASM_EXPR:
2753       return TREE_THIS_VOLATILE (expr);
2754 
2755     case CALL_EXPR:
2756       t = get_callee_fndecl (expr);
2757       /* Assume that calls to weak functions may trap.  */
2758       if (!t || !DECL_P (t))
2759 	return true;
2760       if (DECL_WEAK (t))
2761 	return tree_could_trap_p (t);
2762       return false;
2763 
2764     case FUNCTION_DECL:
2765       /* Assume that accesses to weak functions may trap, unless we know
2766 	 they are certainly defined in current TU or in some other
2767 	 LTO partition.  */
2768       if (DECL_WEAK (expr) && !DECL_COMDAT (expr) && DECL_EXTERNAL (expr))
2769 	{
2770 	  cgraph_node *node = cgraph_node::get (expr);
2771 	  if (node)
2772 	    node = node->function_symbol ();
2773 	  return !(node && node->in_other_partition);
2774 	}
2775       return false;
2776 
2777     case VAR_DECL:
2778       /* Assume that accesses to weak vars may trap, unless we know
2779 	 they are certainly defined in current TU or in some other
2780 	 LTO partition.  */
2781       if (DECL_WEAK (expr) && !DECL_COMDAT (expr) && DECL_EXTERNAL (expr))
2782 	{
2783 	  varpool_node *node = varpool_node::get (expr);
2784 	  if (node)
2785 	    node = node->ultimate_alias_target ();
2786 	  return !(node && node->in_other_partition);
2787 	}
2788       return false;
2789 
2790     default:
2791       return false;
2792     }
2793 }
2794 
2795 /* Return non-NULL if there is an integer operation with trapping overflow
2796    we can rewrite into non-trapping.  Called via walk_tree from
2797    rewrite_to_non_trapping_overflow.  */
2798 
2799 static tree
find_trapping_overflow(tree * tp,int * walk_subtrees,void * data)2800 find_trapping_overflow (tree *tp, int *walk_subtrees, void *data)
2801 {
2802   if (EXPR_P (*tp)
2803       && ANY_INTEGRAL_TYPE_P (TREE_TYPE (*tp))
2804       && !operation_no_trapping_overflow (TREE_TYPE (*tp), TREE_CODE (*tp)))
2805     return *tp;
2806   if (IS_TYPE_OR_DECL_P (*tp)
2807       || (TREE_CODE (*tp) == SAVE_EXPR && data == NULL))
2808     *walk_subtrees = 0;
2809   return NULL_TREE;
2810 }
2811 
2812 /* Rewrite selected operations into unsigned arithmetics, so that they
2813    don't trap on overflow.  */
2814 
2815 static tree
replace_trapping_overflow(tree * tp,int * walk_subtrees,void * data)2816 replace_trapping_overflow (tree *tp, int *walk_subtrees, void *data)
2817 {
2818   if (find_trapping_overflow (tp, walk_subtrees, data))
2819     {
2820       tree type = TREE_TYPE (*tp);
2821       tree utype = unsigned_type_for (type);
2822       *walk_subtrees = 0;
2823       int len = TREE_OPERAND_LENGTH (*tp);
2824       for (int i = 0; i < len; ++i)
2825 	walk_tree (&TREE_OPERAND (*tp, i), replace_trapping_overflow,
2826 		   data, (hash_set<tree> *) data);
2827 
2828       if (TREE_CODE (*tp) == ABS_EXPR)
2829 	{
2830 	  TREE_SET_CODE (*tp, ABSU_EXPR);
2831 	  TREE_TYPE (*tp) = utype;
2832 	  *tp = fold_convert (type, *tp);
2833 	}
2834       else
2835 	{
2836 	  TREE_TYPE (*tp) = utype;
2837 	  len = TREE_OPERAND_LENGTH (*tp);
2838 	  for (int i = 0; i < len; ++i)
2839 	    TREE_OPERAND (*tp, i)
2840 	      = fold_convert (utype, TREE_OPERAND (*tp, i));
2841 	  *tp = fold_convert (type, *tp);
2842 	}
2843     }
2844   return NULL_TREE;
2845 }
2846 
2847 /* If any subexpression of EXPR can trap due to -ftrapv, rewrite it
2848    using unsigned arithmetics to avoid traps in it.  */
2849 
2850 tree
rewrite_to_non_trapping_overflow(tree expr)2851 rewrite_to_non_trapping_overflow (tree expr)
2852 {
2853   if (!flag_trapv)
2854     return expr;
2855   hash_set<tree> pset;
2856   if (!walk_tree (&expr, find_trapping_overflow, &pset, &pset))
2857     return expr;
2858   expr = unshare_expr (expr);
2859   pset.empty ();
2860   walk_tree (&expr, replace_trapping_overflow, &pset, &pset);
2861   return expr;
2862 }
2863 
2864 /* Helper for stmt_could_throw_p.  Return true if STMT (assumed to be a
2865    an assignment or a conditional) may throw.  */
2866 
2867 static bool
stmt_could_throw_1_p(gassign * stmt)2868 stmt_could_throw_1_p (gassign *stmt)
2869 {
2870   enum tree_code code = gimple_assign_rhs_code (stmt);
2871   bool honor_nans = false;
2872   bool honor_snans = false;
2873   bool fp_operation = false;
2874   bool honor_trapv = false;
2875   tree t;
2876   size_t i;
2877   bool handled, ret;
2878 
2879   if (TREE_CODE_CLASS (code) == tcc_comparison
2880       || TREE_CODE_CLASS (code) == tcc_unary
2881       || TREE_CODE_CLASS (code) == tcc_binary)
2882     {
2883       if (TREE_CODE_CLASS (code) == tcc_comparison)
2884 	t = TREE_TYPE (gimple_assign_rhs1 (stmt));
2885       else
2886 	t = gimple_expr_type (stmt);
2887       fp_operation = FLOAT_TYPE_P (t);
2888       if (fp_operation)
2889 	{
2890 	  honor_nans = flag_trapping_math && !flag_finite_math_only;
2891 	  honor_snans = flag_signaling_nans != 0;
2892 	}
2893       else if (INTEGRAL_TYPE_P (t) && TYPE_OVERFLOW_TRAPS (t))
2894 	honor_trapv = true;
2895     }
2896 
2897   /* First check the LHS.  */
2898   if (tree_could_trap_p (gimple_assign_lhs (stmt)))
2899     return true;
2900 
2901   /* Check if the main expression may trap.  */
2902   ret = operation_could_trap_helper_p (code, fp_operation, honor_trapv,
2903 				       honor_nans, honor_snans,
2904 				       gimple_assign_rhs2 (stmt),
2905 				       &handled);
2906   if (handled)
2907     return ret;
2908 
2909   /* If the expression does not trap, see if any of the individual operands may
2910      trap.  */
2911   for (i = 1; i < gimple_num_ops (stmt); i++)
2912     if (tree_could_trap_p (gimple_op (stmt, i)))
2913       return true;
2914 
2915   return false;
2916 }
2917 
2918 
2919 /* Return true if statement STMT within FUN could throw an exception.  */
2920 
2921 bool
stmt_could_throw_p(function * fun,gimple * stmt)2922 stmt_could_throw_p (function *fun, gimple *stmt)
2923 {
2924   if (!flag_exceptions)
2925     return false;
2926 
2927   /* The only statements that can throw an exception are assignments,
2928      conditionals, calls, resx, and asms.  */
2929   switch (gimple_code (stmt))
2930     {
2931     case GIMPLE_RESX:
2932       return true;
2933 
2934     case GIMPLE_CALL:
2935       return !gimple_call_nothrow_p (as_a <gcall *> (stmt));
2936 
2937     case GIMPLE_COND:
2938       {
2939 	if (fun && !fun->can_throw_non_call_exceptions)
2940 	  return false;
2941 	gcond *cond = as_a <gcond *> (stmt);
2942 	tree lhs = gimple_cond_lhs (cond);
2943 	return operation_could_trap_p (gimple_cond_code (cond),
2944 				       FLOAT_TYPE_P (TREE_TYPE (lhs)),
2945 				       false, NULL_TREE);
2946       }
2947 
2948     case GIMPLE_ASSIGN:
2949       if ((fun && !fun->can_throw_non_call_exceptions)
2950 	  || gimple_clobber_p (stmt))
2951         return false;
2952       return stmt_could_throw_1_p (as_a <gassign *> (stmt));
2953 
2954     case GIMPLE_ASM:
2955       if (fun && !fun->can_throw_non_call_exceptions)
2956         return false;
2957       return gimple_asm_volatile_p (as_a <gasm *> (stmt));
2958 
2959     default:
2960       return false;
2961     }
2962 }
2963 
2964 /* Return true if STMT in function FUN must be assumed necessary because of
2965    non-call exceptions.  */
2966 
2967 bool
stmt_unremovable_because_of_non_call_eh_p(function * fun,gimple * stmt)2968 stmt_unremovable_because_of_non_call_eh_p (function *fun, gimple *stmt)
2969 {
2970   return (fun->can_throw_non_call_exceptions
2971 	  && !fun->can_delete_dead_exceptions
2972 	  && stmt_could_throw_p (fun, stmt));
2973 }
2974 
2975 /* Return true if expression T could throw an exception.  */
2976 
2977 bool
tree_could_throw_p(tree t)2978 tree_could_throw_p (tree t)
2979 {
2980   if (!flag_exceptions)
2981     return false;
2982   if (TREE_CODE (t) == MODIFY_EXPR)
2983     {
2984       if (cfun->can_throw_non_call_exceptions
2985           && tree_could_trap_p (TREE_OPERAND (t, 0)))
2986         return true;
2987       t = TREE_OPERAND (t, 1);
2988     }
2989 
2990   if (TREE_CODE (t) == WITH_SIZE_EXPR)
2991     t = TREE_OPERAND (t, 0);
2992   if (TREE_CODE (t) == CALL_EXPR)
2993     return (call_expr_flags (t) & ECF_NOTHROW) == 0;
2994   if (cfun->can_throw_non_call_exceptions)
2995     return tree_could_trap_p (t);
2996   return false;
2997 }
2998 
2999 /* Return true if STMT can throw an exception that is not caught within its
3000    function FUN.  FUN can be NULL but the function is extra conservative
3001    then.  */
3002 
3003 bool
stmt_can_throw_external(function * fun,gimple * stmt)3004 stmt_can_throw_external (function *fun, gimple *stmt)
3005 {
3006   int lp_nr;
3007 
3008   if (!stmt_could_throw_p (fun, stmt))
3009     return false;
3010   if (!fun)
3011     return true;
3012 
3013   lp_nr = lookup_stmt_eh_lp_fn (fun, stmt);
3014   return lp_nr == 0;
3015 }
3016 
3017 /* Return true if STMT can throw an exception that is caught within its
3018    function FUN.  */
3019 
3020 bool
stmt_can_throw_internal(function * fun,gimple * stmt)3021 stmt_can_throw_internal (function *fun, gimple *stmt)
3022 {
3023   int lp_nr;
3024 
3025   gcc_checking_assert (fun);
3026   if (!stmt_could_throw_p (fun, stmt))
3027     return false;
3028 
3029   lp_nr = lookup_stmt_eh_lp_fn (fun, stmt);
3030   return lp_nr > 0;
3031 }
3032 
3033 /* Given a statement STMT in IFUN, if STMT can no longer throw, then
3034    remove any entry it might have from the EH table.  Return true if
3035    any change was made.  */
3036 
3037 bool
maybe_clean_eh_stmt_fn(struct function * ifun,gimple * stmt)3038 maybe_clean_eh_stmt_fn (struct function *ifun, gimple *stmt)
3039 {
3040   if (stmt_could_throw_p (ifun, stmt))
3041     return false;
3042   return remove_stmt_from_eh_lp_fn (ifun, stmt);
3043 }
3044 
3045 /* Likewise, but always use the current function.  */
3046 
3047 bool
maybe_clean_eh_stmt(gimple * stmt)3048 maybe_clean_eh_stmt (gimple *stmt)
3049 {
3050   return maybe_clean_eh_stmt_fn (cfun, stmt);
3051 }
3052 
3053 /* Given a statement OLD_STMT and a new statement NEW_STMT that has replaced
3054    OLD_STMT in the function, remove OLD_STMT from the EH table and put NEW_STMT
3055    in the table if it should be in there.  Return TRUE if a replacement was
3056    done that my require an EH edge purge.  */
3057 
3058 bool
maybe_clean_or_replace_eh_stmt(gimple * old_stmt,gimple * new_stmt)3059 maybe_clean_or_replace_eh_stmt (gimple *old_stmt, gimple *new_stmt)
3060 {
3061   int lp_nr = lookup_stmt_eh_lp (old_stmt);
3062 
3063   if (lp_nr != 0)
3064     {
3065       bool new_stmt_could_throw = stmt_could_throw_p (cfun, new_stmt);
3066 
3067       if (new_stmt == old_stmt && new_stmt_could_throw)
3068 	return false;
3069 
3070       remove_stmt_from_eh_lp (old_stmt);
3071       if (new_stmt_could_throw)
3072 	{
3073 	  add_stmt_to_eh_lp (new_stmt, lp_nr);
3074 	  return false;
3075 	}
3076       else
3077 	return true;
3078     }
3079 
3080   return false;
3081 }
3082 
3083 /* Given a statement OLD_STMT in OLD_FUN and a duplicate statement NEW_STMT
3084    in NEW_FUN, copy the EH table data from OLD_STMT to NEW_STMT.  The MAP
3085    operand is the return value of duplicate_eh_regions.  */
3086 
3087 bool
maybe_duplicate_eh_stmt_fn(struct function * new_fun,gimple * new_stmt,struct function * old_fun,gimple * old_stmt,hash_map<void *,void * > * map,int default_lp_nr)3088 maybe_duplicate_eh_stmt_fn (struct function *new_fun, gimple *new_stmt,
3089 			    struct function *old_fun, gimple *old_stmt,
3090 			    hash_map<void *, void *> *map,
3091 			    int default_lp_nr)
3092 {
3093   int old_lp_nr, new_lp_nr;
3094 
3095   if (!stmt_could_throw_p (new_fun, new_stmt))
3096     return false;
3097 
3098   old_lp_nr = lookup_stmt_eh_lp_fn (old_fun, old_stmt);
3099   if (old_lp_nr == 0)
3100     {
3101       if (default_lp_nr == 0)
3102 	return false;
3103       new_lp_nr = default_lp_nr;
3104     }
3105   else if (old_lp_nr > 0)
3106     {
3107       eh_landing_pad old_lp, new_lp;
3108 
3109       old_lp = (*old_fun->eh->lp_array)[old_lp_nr];
3110       new_lp = static_cast<eh_landing_pad> (*map->get (old_lp));
3111       new_lp_nr = new_lp->index;
3112     }
3113   else
3114     {
3115       eh_region old_r, new_r;
3116 
3117       old_r = (*old_fun->eh->region_array)[-old_lp_nr];
3118       new_r = static_cast<eh_region> (*map->get (old_r));
3119       new_lp_nr = -new_r->index;
3120     }
3121 
3122   add_stmt_to_eh_lp_fn (new_fun, new_stmt, new_lp_nr);
3123   return true;
3124 }
3125 
3126 /* Similar, but both OLD_STMT and NEW_STMT are within the current function,
3127    and thus no remapping is required.  */
3128 
3129 bool
maybe_duplicate_eh_stmt(gimple * new_stmt,gimple * old_stmt)3130 maybe_duplicate_eh_stmt (gimple *new_stmt, gimple *old_stmt)
3131 {
3132   int lp_nr;
3133 
3134   if (!stmt_could_throw_p (cfun, new_stmt))
3135     return false;
3136 
3137   lp_nr = lookup_stmt_eh_lp (old_stmt);
3138   if (lp_nr == 0)
3139     return false;
3140 
3141   add_stmt_to_eh_lp (new_stmt, lp_nr);
3142   return true;
3143 }
3144 
3145 /* Returns TRUE if oneh and twoh are exception handlers (gimple_try_cleanup of
3146    GIMPLE_TRY) that are similar enough to be considered the same.  Currently
3147    this only handles handlers consisting of a single call, as that's the
3148    important case for C++: a destructor call for a particular object showing
3149    up in multiple handlers.  */
3150 
3151 static bool
same_handler_p(gimple_seq oneh,gimple_seq twoh)3152 same_handler_p (gimple_seq oneh, gimple_seq twoh)
3153 {
3154   gimple_stmt_iterator gsi;
3155   gimple *ones, *twos;
3156   unsigned int ai;
3157 
3158   gsi = gsi_start (oneh);
3159   if (!gsi_one_before_end_p (gsi))
3160     return false;
3161   ones = gsi_stmt (gsi);
3162 
3163   gsi = gsi_start (twoh);
3164   if (!gsi_one_before_end_p (gsi))
3165     return false;
3166   twos = gsi_stmt (gsi);
3167 
3168   if (!is_gimple_call (ones)
3169       || !is_gimple_call (twos)
3170       || gimple_call_lhs (ones)
3171       || gimple_call_lhs (twos)
3172       || gimple_call_chain (ones)
3173       || gimple_call_chain (twos)
3174       || !gimple_call_same_target_p (ones, twos)
3175       || gimple_call_num_args (ones) != gimple_call_num_args (twos))
3176     return false;
3177 
3178   for (ai = 0; ai < gimple_call_num_args (ones); ++ai)
3179     if (!operand_equal_p (gimple_call_arg (ones, ai),
3180                           gimple_call_arg (twos, ai), 0))
3181       return false;
3182 
3183   return true;
3184 }
3185 
3186 /* Optimize
3187     try { A() } finally { try { ~B() } catch { ~A() } }
3188     try { ... } finally { ~A() }
3189    into
3190     try { A() } catch { ~B() }
3191     try { ~B() ... } finally { ~A() }
3192 
3193    This occurs frequently in C++, where A is a local variable and B is a
3194    temporary used in the initializer for A.  */
3195 
3196 static void
optimize_double_finally(gtry * one,gtry * two)3197 optimize_double_finally (gtry *one, gtry *two)
3198 {
3199   gimple *oneh;
3200   gimple_stmt_iterator gsi;
3201   gimple_seq cleanup;
3202 
3203   cleanup = gimple_try_cleanup (one);
3204   gsi = gsi_start (cleanup);
3205   if (!gsi_one_before_end_p (gsi))
3206     return;
3207 
3208   oneh = gsi_stmt (gsi);
3209   if (gimple_code (oneh) != GIMPLE_TRY
3210       || gimple_try_kind (oneh) != GIMPLE_TRY_CATCH)
3211     return;
3212 
3213   if (same_handler_p (gimple_try_cleanup (oneh), gimple_try_cleanup (two)))
3214     {
3215       gimple_seq seq = gimple_try_eval (oneh);
3216 
3217       gimple_try_set_cleanup (one, seq);
3218       gimple_try_set_kind (one, GIMPLE_TRY_CATCH);
3219       seq = copy_gimple_seq_and_replace_locals (seq);
3220       gimple_seq_add_seq (&seq, gimple_try_eval (two));
3221       gimple_try_set_eval (two, seq);
3222     }
3223 }
3224 
3225 /* Perform EH refactoring optimizations that are simpler to do when code
3226    flow has been lowered but EH structures haven't.  */
3227 
3228 static void
refactor_eh_r(gimple_seq seq)3229 refactor_eh_r (gimple_seq seq)
3230 {
3231   gimple_stmt_iterator gsi;
3232   gimple *one, *two;
3233 
3234   one = NULL;
3235   two = NULL;
3236   gsi = gsi_start (seq);
3237   while (1)
3238     {
3239       one = two;
3240       if (gsi_end_p (gsi))
3241 	two = NULL;
3242       else
3243 	two = gsi_stmt (gsi);
3244       if (one && two)
3245 	if (gtry *try_one = dyn_cast <gtry *> (one))
3246 	  if (gtry *try_two = dyn_cast <gtry *> (two))
3247 	    if (gimple_try_kind (try_one) == GIMPLE_TRY_FINALLY
3248 		&& gimple_try_kind (try_two) == GIMPLE_TRY_FINALLY)
3249 	      optimize_double_finally (try_one, try_two);
3250       if (one)
3251 	switch (gimple_code (one))
3252 	  {
3253 	  case GIMPLE_TRY:
3254 	    refactor_eh_r (gimple_try_eval (one));
3255 	    refactor_eh_r (gimple_try_cleanup (one));
3256 	    break;
3257 	  case GIMPLE_CATCH:
3258 	    refactor_eh_r (gimple_catch_handler (as_a <gcatch *> (one)));
3259 	    break;
3260 	  case GIMPLE_EH_FILTER:
3261 	    refactor_eh_r (gimple_eh_filter_failure (one));
3262 	    break;
3263 	  case GIMPLE_EH_ELSE:
3264 	    {
3265 	      geh_else *eh_else_stmt = as_a <geh_else *> (one);
3266 	      refactor_eh_r (gimple_eh_else_n_body (eh_else_stmt));
3267 	      refactor_eh_r (gimple_eh_else_e_body (eh_else_stmt));
3268 	    }
3269 	    break;
3270 	  default:
3271 	    break;
3272 	  }
3273       if (two)
3274 	gsi_next (&gsi);
3275       else
3276 	break;
3277     }
3278 }
3279 
3280 namespace {
3281 
3282 const pass_data pass_data_refactor_eh =
3283 {
3284   GIMPLE_PASS, /* type */
3285   "ehopt", /* name */
3286   OPTGROUP_NONE, /* optinfo_flags */
3287   TV_TREE_EH, /* tv_id */
3288   PROP_gimple_lcf, /* properties_required */
3289   0, /* properties_provided */
3290   0, /* properties_destroyed */
3291   0, /* todo_flags_start */
3292   0, /* todo_flags_finish */
3293 };
3294 
3295 class pass_refactor_eh : public gimple_opt_pass
3296 {
3297 public:
pass_refactor_eh(gcc::context * ctxt)3298   pass_refactor_eh (gcc::context *ctxt)
3299     : gimple_opt_pass (pass_data_refactor_eh, ctxt)
3300   {}
3301 
3302   /* opt_pass methods: */
gate(function *)3303   virtual bool gate (function *) { return flag_exceptions != 0; }
execute(function *)3304   virtual unsigned int execute (function *)
3305     {
3306       refactor_eh_r (gimple_body (current_function_decl));
3307       return 0;
3308     }
3309 
3310 }; // class pass_refactor_eh
3311 
3312 } // anon namespace
3313 
3314 gimple_opt_pass *
make_pass_refactor_eh(gcc::context * ctxt)3315 make_pass_refactor_eh (gcc::context *ctxt)
3316 {
3317   return new pass_refactor_eh (ctxt);
3318 }
3319 
3320 /* At the end of gimple optimization, we can lower RESX.  */
3321 
3322 static bool
lower_resx(basic_block bb,gresx * stmt,hash_map<eh_region,tree> * mnt_map)3323 lower_resx (basic_block bb, gresx *stmt,
3324 	    hash_map<eh_region, tree> *mnt_map)
3325 {
3326   int lp_nr;
3327   eh_region src_r, dst_r;
3328   gimple_stmt_iterator gsi;
3329   gimple *x;
3330   tree fn, src_nr;
3331   bool ret = false;
3332 
3333   lp_nr = lookup_stmt_eh_lp (stmt);
3334   if (lp_nr != 0)
3335     dst_r = get_eh_region_from_lp_number (lp_nr);
3336   else
3337     dst_r = NULL;
3338 
3339   src_r = get_eh_region_from_number (gimple_resx_region (stmt));
3340   gsi = gsi_last_bb (bb);
3341 
3342   if (src_r == NULL)
3343     {
3344       /* We can wind up with no source region when pass_cleanup_eh shows
3345 	 that there are no entries into an eh region and deletes it, but
3346 	 then the block that contains the resx isn't removed.  This can
3347 	 happen without optimization when the switch statement created by
3348 	 lower_try_finally_switch isn't simplified to remove the eh case.
3349 
3350 	 Resolve this by expanding the resx node to an abort.  */
3351 
3352       fn = builtin_decl_implicit (BUILT_IN_TRAP);
3353       x = gimple_build_call (fn, 0);
3354       gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3355 
3356       while (EDGE_COUNT (bb->succs) > 0)
3357 	remove_edge (EDGE_SUCC (bb, 0));
3358     }
3359   else if (dst_r)
3360     {
3361       /* When we have a destination region, we resolve this by copying
3362 	 the excptr and filter values into place, and changing the edge
3363 	 to immediately after the landing pad.  */
3364       edge e;
3365 
3366       if (lp_nr < 0)
3367 	{
3368 	  basic_block new_bb;
3369 	  tree lab;
3370 
3371 	  /* We are resuming into a MUST_NOT_CALL region.  Expand a call to
3372 	     the failure decl into a new block, if needed.  */
3373 	  gcc_assert (dst_r->type == ERT_MUST_NOT_THROW);
3374 
3375 	  tree *slot = mnt_map->get (dst_r);
3376 	  if (slot == NULL)
3377 	    {
3378 	      gimple_stmt_iterator gsi2;
3379 
3380 	      new_bb = create_empty_bb (bb);
3381 	      new_bb->count = bb->count;
3382 	      add_bb_to_loop (new_bb, bb->loop_father);
3383 	      lab = gimple_block_label (new_bb);
3384 	      gsi2 = gsi_start_bb (new_bb);
3385 
3386 	      fn = dst_r->u.must_not_throw.failure_decl;
3387 	      x = gimple_build_call (fn, 0);
3388 	      gimple_set_location (x, dst_r->u.must_not_throw.failure_loc);
3389 	      gsi_insert_after (&gsi2, x, GSI_CONTINUE_LINKING);
3390 
3391 	      mnt_map->put (dst_r, lab);
3392 	    }
3393 	  else
3394 	    {
3395 	      lab = *slot;
3396 	      new_bb = label_to_block (cfun, lab);
3397 	    }
3398 
3399 	  gcc_assert (EDGE_COUNT (bb->succs) == 0);
3400 	  e = make_single_succ_edge (bb, new_bb, EDGE_FALLTHRU);
3401 	}
3402       else
3403 	{
3404 	  edge_iterator ei;
3405 	  tree dst_nr = build_int_cst (integer_type_node, dst_r->index);
3406 
3407 	  fn = builtin_decl_implicit (BUILT_IN_EH_COPY_VALUES);
3408 	  src_nr = build_int_cst (integer_type_node, src_r->index);
3409 	  x = gimple_build_call (fn, 2, dst_nr, src_nr);
3410 	  gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3411 
3412 	  /* Update the flags for the outgoing edge.  */
3413 	  e = single_succ_edge (bb);
3414 	  gcc_assert (e->flags & EDGE_EH);
3415 	  e->flags = (e->flags & ~EDGE_EH) | EDGE_FALLTHRU;
3416 	  e->probability = profile_probability::always ();
3417 
3418 	  /* If there are no more EH users of the landing pad, delete it.  */
3419 	  FOR_EACH_EDGE (e, ei, e->dest->preds)
3420 	    if (e->flags & EDGE_EH)
3421 	      break;
3422 	  if (e == NULL)
3423 	    {
3424 	      eh_landing_pad lp = get_eh_landing_pad_from_number (lp_nr);
3425 	      remove_eh_landing_pad (lp);
3426 	    }
3427 	}
3428 
3429       ret = true;
3430     }
3431   else
3432     {
3433       tree var;
3434 
3435       /* When we don't have a destination region, this exception escapes
3436 	 up the call chain.  We resolve this by generating a call to the
3437 	 _Unwind_Resume library function.  */
3438 
3439       /* The ARM EABI redefines _Unwind_Resume as __cxa_end_cleanup
3440 	 with no arguments for C++.  Check for that.  */
3441       if (src_r->use_cxa_end_cleanup)
3442 	{
3443 	  fn = builtin_decl_implicit (BUILT_IN_CXA_END_CLEANUP);
3444 	  x = gimple_build_call (fn, 0);
3445 	  gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3446 	}
3447       else
3448 	{
3449 	  fn = builtin_decl_implicit (BUILT_IN_EH_POINTER);
3450 	  src_nr = build_int_cst (integer_type_node, src_r->index);
3451 	  x = gimple_build_call (fn, 1, src_nr);
3452 	  var = create_tmp_var (ptr_type_node);
3453 	  var = make_ssa_name (var, x);
3454 	  gimple_call_set_lhs (x, var);
3455 	  gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3456 
3457 	  /* When exception handling is delegated to a caller function, we
3458 	     have to guarantee that shadow memory variables living on stack
3459 	     will be cleaner before control is given to a parent function.  */
3460 	  if (sanitize_flags_p (SANITIZE_ADDRESS))
3461 	    {
3462 	      tree decl
3463 		= builtin_decl_implicit (BUILT_IN_ASAN_HANDLE_NO_RETURN);
3464 	      gimple *g = gimple_build_call (decl, 0);
3465 	      gimple_set_location (g, gimple_location (stmt));
3466 	      gsi_insert_before (&gsi, g, GSI_SAME_STMT);
3467 	    }
3468 
3469 	  fn = builtin_decl_implicit (BUILT_IN_UNWIND_RESUME);
3470 	  x = gimple_build_call (fn, 1, var);
3471 	  gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3472 	}
3473 
3474       gcc_assert (EDGE_COUNT (bb->succs) == 0);
3475     }
3476 
3477   gsi_remove (&gsi, true);
3478 
3479   return ret;
3480 }
3481 
3482 namespace {
3483 
3484 const pass_data pass_data_lower_resx =
3485 {
3486   GIMPLE_PASS, /* type */
3487   "resx", /* name */
3488   OPTGROUP_NONE, /* optinfo_flags */
3489   TV_TREE_EH, /* tv_id */
3490   PROP_gimple_lcf, /* properties_required */
3491   0, /* properties_provided */
3492   0, /* properties_destroyed */
3493   0, /* todo_flags_start */
3494   0, /* todo_flags_finish */
3495 };
3496 
3497 class pass_lower_resx : public gimple_opt_pass
3498 {
3499 public:
pass_lower_resx(gcc::context * ctxt)3500   pass_lower_resx (gcc::context *ctxt)
3501     : gimple_opt_pass (pass_data_lower_resx, ctxt)
3502   {}
3503 
3504   /* opt_pass methods: */
gate(function *)3505   virtual bool gate (function *) { return flag_exceptions != 0; }
3506   virtual unsigned int execute (function *);
3507 
3508 }; // class pass_lower_resx
3509 
3510 unsigned
execute(function * fun)3511 pass_lower_resx::execute (function *fun)
3512 {
3513   basic_block bb;
3514   bool dominance_invalidated = false;
3515   bool any_rewritten = false;
3516 
3517   hash_map<eh_region, tree> mnt_map;
3518 
3519   FOR_EACH_BB_FN (bb, fun)
3520     {
3521       gimple *last = last_stmt (bb);
3522       if (last && is_gimple_resx (last))
3523 	{
3524 	  dominance_invalidated |=
3525 	    lower_resx (bb, as_a <gresx *> (last), &mnt_map);
3526 	  any_rewritten = true;
3527 	}
3528     }
3529 
3530   if (dominance_invalidated)
3531     {
3532       free_dominance_info (CDI_DOMINATORS);
3533       free_dominance_info (CDI_POST_DOMINATORS);
3534     }
3535 
3536   return any_rewritten ? TODO_update_ssa_only_virtuals : 0;
3537 }
3538 
3539 } // anon namespace
3540 
3541 gimple_opt_pass *
make_pass_lower_resx(gcc::context * ctxt)3542 make_pass_lower_resx (gcc::context *ctxt)
3543 {
3544   return new pass_lower_resx (ctxt);
3545 }
3546 
3547 /* Try to optimize var = {v} {CLOBBER} stmts followed just by
3548    external throw.  */
3549 
3550 static void
optimize_clobbers(basic_block bb)3551 optimize_clobbers (basic_block bb)
3552 {
3553   gimple_stmt_iterator gsi = gsi_last_bb (bb);
3554   bool any_clobbers = false;
3555   bool seen_stack_restore = false;
3556   edge_iterator ei;
3557   edge e;
3558 
3559   /* Only optimize anything if the bb contains at least one clobber,
3560      ends with resx (checked by caller), optionally contains some
3561      debug stmts or labels, or at most one __builtin_stack_restore
3562      call, and has an incoming EH edge.  */
3563   for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3564     {
3565       gimple *stmt = gsi_stmt (gsi);
3566       if (is_gimple_debug (stmt))
3567 	continue;
3568       if (gimple_clobber_p (stmt))
3569 	{
3570 	  any_clobbers = true;
3571 	  continue;
3572 	}
3573       if (!seen_stack_restore
3574 	  && gimple_call_builtin_p (stmt, BUILT_IN_STACK_RESTORE))
3575 	{
3576 	  seen_stack_restore = true;
3577 	  continue;
3578 	}
3579       if (gimple_code (stmt) == GIMPLE_LABEL)
3580 	break;
3581       return;
3582     }
3583   if (!any_clobbers)
3584     return;
3585   FOR_EACH_EDGE (e, ei, bb->preds)
3586     if (e->flags & EDGE_EH)
3587       break;
3588   if (e == NULL)
3589     return;
3590   gsi = gsi_last_bb (bb);
3591   for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3592     {
3593       gimple *stmt = gsi_stmt (gsi);
3594       if (!gimple_clobber_p (stmt))
3595 	continue;
3596       unlink_stmt_vdef (stmt);
3597       gsi_remove (&gsi, true);
3598       release_defs (stmt);
3599     }
3600 }
3601 
3602 /* Try to sink var = {v} {CLOBBER} stmts followed just by
3603    internal throw to successor BB.
3604    SUNK, if not NULL, is an array of sequences indexed by basic-block
3605    index to sink to and to pick up sinking opportunities from.
3606    If FOUND_OPPORTUNITY is not NULL then do not perform the optimization
3607    but set *FOUND_OPPORTUNITY to true.  */
3608 
3609 static int
3610 sink_clobbers (basic_block bb,
3611 	       gimple_seq *sunk = NULL, bool *found_opportunity = NULL)
3612 {
3613   edge e;
3614   edge_iterator ei;
3615   gimple_stmt_iterator gsi, dgsi;
3616   basic_block succbb;
3617   bool any_clobbers = false;
3618   unsigned todo = 0;
3619 
3620   /* Only optimize if BB has a single EH successor and
3621      all predecessor edges are EH too.  */
3622   if (!single_succ_p (bb)
3623       || (single_succ_edge (bb)->flags & EDGE_EH) == 0)
3624     return 0;
3625 
3626   FOR_EACH_EDGE (e, ei, bb->preds)
3627     {
3628       if ((e->flags & EDGE_EH) == 0)
3629 	return 0;
3630     }
3631 
3632   /* And BB contains only CLOBBER stmts before the final
3633      RESX.  */
3634   gsi = gsi_last_bb (bb);
3635   for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3636     {
3637       gimple *stmt = gsi_stmt (gsi);
3638       if (is_gimple_debug (stmt))
3639 	continue;
3640       if (gimple_code (stmt) == GIMPLE_LABEL)
3641 	break;
3642       if (!gimple_clobber_p (stmt))
3643 	return 0;
3644       any_clobbers = true;
3645     }
3646   if (!any_clobbers && (!sunk || gimple_seq_empty_p (sunk[bb->index])))
3647     return 0;
3648 
3649   /* If this was a dry run, tell it we found clobbers to sink.  */
3650   if (found_opportunity)
3651     {
3652       *found_opportunity = true;
3653       return 0;
3654     }
3655 
3656   edge succe = single_succ_edge (bb);
3657   succbb = succe->dest;
3658 
3659   /* See if there is a virtual PHI node to take an updated virtual
3660      operand from.  */
3661   gphi *vphi = NULL;
3662   for (gphi_iterator gpi = gsi_start_phis (succbb);
3663        !gsi_end_p (gpi); gsi_next (&gpi))
3664     {
3665       tree res = gimple_phi_result (gpi.phi ());
3666       if (virtual_operand_p (res))
3667 	{
3668 	  vphi = gpi.phi ();
3669 	  break;
3670 	}
3671     }
3672 
3673   gimple *first_sunk = NULL;
3674   gimple *last_sunk = NULL;
3675   if (sunk && !(succbb->flags & BB_VISITED))
3676     dgsi = gsi_start (sunk[succbb->index]);
3677   else
3678     dgsi = gsi_after_labels (succbb);
3679   gsi = gsi_last_bb (bb);
3680   for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3681     {
3682       gimple *stmt = gsi_stmt (gsi);
3683       tree lhs;
3684       if (is_gimple_debug (stmt))
3685 	continue;
3686       if (gimple_code (stmt) == GIMPLE_LABEL)
3687 	break;
3688       lhs = gimple_assign_lhs (stmt);
3689       /* Unfortunately we don't have dominance info updated at this
3690 	 point, so checking if
3691 	 dominated_by_p (CDI_DOMINATORS, succbb,
3692 			 gimple_bb (SSA_NAME_DEF_STMT (TREE_OPERAND (lhs, 0)))
3693 	 would be too costly.  Thus, avoid sinking any clobbers that
3694 	 refer to non-(D) SSA_NAMEs.  */
3695       if (TREE_CODE (lhs) == MEM_REF
3696 	  && TREE_CODE (TREE_OPERAND (lhs, 0)) == SSA_NAME
3697 	  && !SSA_NAME_IS_DEFAULT_DEF (TREE_OPERAND (lhs, 0)))
3698 	{
3699 	  unlink_stmt_vdef (stmt);
3700 	  gsi_remove (&gsi, true);
3701 	  release_defs (stmt);
3702 	  continue;
3703 	}
3704 
3705       /* As we do not change stmt order when sinking across a
3706          forwarder edge we can keep virtual operands in place.  */
3707       gsi_remove (&gsi, false);
3708       gsi_insert_before (&dgsi, stmt, GSI_NEW_STMT);
3709       if (!first_sunk)
3710 	first_sunk = stmt;
3711       last_sunk = stmt;
3712     }
3713   if (sunk && !gimple_seq_empty_p (sunk[bb->index]))
3714     {
3715       if (!first_sunk)
3716 	first_sunk = gsi_stmt (gsi_last (sunk[bb->index]));
3717       last_sunk = gsi_stmt (gsi_start (sunk[bb->index]));
3718       gsi_insert_seq_before_without_update (&dgsi,
3719 					    sunk[bb->index], GSI_NEW_STMT);
3720       sunk[bb->index] = NULL;
3721     }
3722   if (first_sunk)
3723     {
3724       /* Adjust virtual operands if we sunk across a virtual PHI.  */
3725       if (vphi)
3726 	{
3727 	  imm_use_iterator iter;
3728 	  use_operand_p use_p;
3729 	  gimple *use_stmt;
3730 	  tree phi_def = gimple_phi_result (vphi);
3731 	  FOR_EACH_IMM_USE_STMT (use_stmt, iter, phi_def)
3732 	    FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
3733               SET_USE (use_p, gimple_vdef (first_sunk));
3734 	  if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (phi_def))
3735 	    {
3736 	      SSA_NAME_OCCURS_IN_ABNORMAL_PHI (gimple_vdef (first_sunk)) = 1;
3737 	      SSA_NAME_OCCURS_IN_ABNORMAL_PHI (phi_def) = 0;
3738 	    }
3739 	  SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (vphi, succe),
3740 		   gimple_vuse (last_sunk));
3741 	  SET_USE (gimple_vuse_op (last_sunk), phi_def);
3742 	}
3743       /* If there isn't a single predecessor but no virtual PHI node
3744          arrange for virtual operands to be renamed.  */
3745       else if (!single_pred_p (succbb)
3746 	       && TREE_CODE (gimple_vuse (last_sunk)) == SSA_NAME)
3747 	{
3748 	  mark_virtual_operand_for_renaming (gimple_vuse (last_sunk));
3749 	  todo |= TODO_update_ssa_only_virtuals;
3750 	}
3751     }
3752 
3753   return todo;
3754 }
3755 
3756 /* At the end of inlining, we can lower EH_DISPATCH.  Return true when
3757    we have found some duplicate labels and removed some edges.  */
3758 
3759 static bool
lower_eh_dispatch(basic_block src,geh_dispatch * stmt)3760 lower_eh_dispatch (basic_block src, geh_dispatch *stmt)
3761 {
3762   gimple_stmt_iterator gsi;
3763   int region_nr;
3764   eh_region r;
3765   tree filter, fn;
3766   gimple *x;
3767   bool redirected = false;
3768 
3769   region_nr = gimple_eh_dispatch_region (stmt);
3770   r = get_eh_region_from_number (region_nr);
3771 
3772   gsi = gsi_last_bb (src);
3773 
3774   switch (r->type)
3775     {
3776     case ERT_TRY:
3777       {
3778 	auto_vec<tree> labels;
3779 	tree default_label = NULL;
3780 	eh_catch c;
3781 	edge_iterator ei;
3782 	edge e;
3783 	hash_set<tree> seen_values;
3784 
3785 	/* Collect the labels for a switch.  Zero the post_landing_pad
3786 	   field becase we'll no longer have anything keeping these labels
3787 	   in existence and the optimizer will be free to merge these
3788 	   blocks at will.  */
3789 	for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
3790 	  {
3791 	    tree tp_node, flt_node, lab = c->label;
3792 	    bool have_label = false;
3793 
3794 	    c->label = NULL;
3795 	    tp_node = c->type_list;
3796 	    flt_node = c->filter_list;
3797 
3798 	    if (tp_node == NULL)
3799 	      {
3800 	        default_label = lab;
3801 		break;
3802 	      }
3803 	    do
3804 	      {
3805 		/* Filter out duplicate labels that arise when this handler
3806 		   is shadowed by an earlier one.  When no labels are
3807 		   attached to the handler anymore, we remove
3808 		   the corresponding edge and then we delete unreachable
3809 		   blocks at the end of this pass.  */
3810 		if (! seen_values.contains (TREE_VALUE (flt_node)))
3811 		  {
3812 		    tree t = build_case_label (TREE_VALUE (flt_node),
3813 					       NULL, lab);
3814 		    labels.safe_push (t);
3815 		    seen_values.add (TREE_VALUE (flt_node));
3816 		    have_label = true;
3817 		  }
3818 
3819 		tp_node = TREE_CHAIN (tp_node);
3820 		flt_node = TREE_CHAIN (flt_node);
3821 	      }
3822 	    while (tp_node);
3823 	    if (! have_label)
3824 	      {
3825 		remove_edge (find_edge (src, label_to_block (cfun, lab)));
3826 	        redirected = true;
3827 	      }
3828 	  }
3829 
3830 	/* Clean up the edge flags.  */
3831 	FOR_EACH_EDGE (e, ei, src->succs)
3832 	  {
3833 	    if (e->flags & EDGE_FALLTHRU)
3834 	      {
3835 		/* If there was no catch-all, use the fallthru edge.  */
3836 		if (default_label == NULL)
3837 		  default_label = gimple_block_label (e->dest);
3838 		e->flags &= ~EDGE_FALLTHRU;
3839 	      }
3840 	  }
3841 	gcc_assert (default_label != NULL);
3842 
3843 	/* Don't generate a switch if there's only a default case.
3844 	   This is common in the form of try { A; } catch (...) { B; }.  */
3845 	if (!labels.exists ())
3846 	  {
3847 	    e = single_succ_edge (src);
3848 	    e->flags |= EDGE_FALLTHRU;
3849 	  }
3850 	else
3851 	  {
3852 	    fn = builtin_decl_implicit (BUILT_IN_EH_FILTER);
3853 	    x = gimple_build_call (fn, 1, build_int_cst (integer_type_node,
3854 							 region_nr));
3855 	    filter = create_tmp_var (TREE_TYPE (TREE_TYPE (fn)));
3856 	    filter = make_ssa_name (filter, x);
3857 	    gimple_call_set_lhs (x, filter);
3858 	    gimple_set_location (x, gimple_location (stmt));
3859 	    gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3860 
3861 	    /* Turn the default label into a default case.  */
3862 	    default_label = build_case_label (NULL, NULL, default_label);
3863 	    sort_case_labels (labels);
3864 
3865 	    x = gimple_build_switch (filter, default_label, labels);
3866 	    gimple_set_location (x, gimple_location (stmt));
3867 	    gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3868 	  }
3869       }
3870       break;
3871 
3872     case ERT_ALLOWED_EXCEPTIONS:
3873       {
3874 	edge b_e = BRANCH_EDGE (src);
3875 	edge f_e = FALLTHRU_EDGE (src);
3876 
3877 	fn = builtin_decl_implicit (BUILT_IN_EH_FILTER);
3878 	x = gimple_build_call (fn, 1, build_int_cst (integer_type_node,
3879 						     region_nr));
3880 	filter = create_tmp_var (TREE_TYPE (TREE_TYPE (fn)));
3881 	filter = make_ssa_name (filter, x);
3882 	gimple_call_set_lhs (x, filter);
3883 	gimple_set_location (x, gimple_location (stmt));
3884 	gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3885 
3886 	r->u.allowed.label = NULL;
3887 	x = gimple_build_cond (EQ_EXPR, filter,
3888 			       build_int_cst (TREE_TYPE (filter),
3889 					      r->u.allowed.filter),
3890 			       NULL_TREE, NULL_TREE);
3891 	gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3892 
3893 	b_e->flags = b_e->flags | EDGE_TRUE_VALUE;
3894         f_e->flags = (f_e->flags & ~EDGE_FALLTHRU) | EDGE_FALSE_VALUE;
3895       }
3896       break;
3897 
3898     default:
3899       gcc_unreachable ();
3900     }
3901 
3902   /* Replace the EH_DISPATCH with the SWITCH or COND generated above.  */
3903   gsi_remove (&gsi, true);
3904   return redirected;
3905 }
3906 
3907 namespace {
3908 
3909 const pass_data pass_data_lower_eh_dispatch =
3910 {
3911   GIMPLE_PASS, /* type */
3912   "ehdisp", /* name */
3913   OPTGROUP_NONE, /* optinfo_flags */
3914   TV_TREE_EH, /* tv_id */
3915   PROP_gimple_lcf, /* properties_required */
3916   0, /* properties_provided */
3917   0, /* properties_destroyed */
3918   0, /* todo_flags_start */
3919   0, /* todo_flags_finish */
3920 };
3921 
3922 class pass_lower_eh_dispatch : public gimple_opt_pass
3923 {
3924 public:
pass_lower_eh_dispatch(gcc::context * ctxt)3925   pass_lower_eh_dispatch (gcc::context *ctxt)
3926     : gimple_opt_pass (pass_data_lower_eh_dispatch, ctxt)
3927   {}
3928 
3929   /* opt_pass methods: */
gate(function * fun)3930   virtual bool gate (function *fun) { return fun->eh->region_tree != NULL; }
3931   virtual unsigned int execute (function *);
3932 
3933 }; // class pass_lower_eh_dispatch
3934 
3935 unsigned
execute(function * fun)3936 pass_lower_eh_dispatch::execute (function *fun)
3937 {
3938   basic_block bb;
3939   int flags = 0;
3940   bool redirected = false;
3941   bool any_resx_to_process = false;
3942 
3943   assign_filter_values ();
3944 
3945   FOR_EACH_BB_FN (bb, fun)
3946     {
3947       gimple *last = last_stmt (bb);
3948       if (last == NULL)
3949 	continue;
3950       if (gimple_code (last) == GIMPLE_EH_DISPATCH)
3951 	{
3952 	  redirected |= lower_eh_dispatch (bb,
3953 					   as_a <geh_dispatch *> (last));
3954 	  flags |= TODO_update_ssa_only_virtuals;
3955 	}
3956       else if (gimple_code (last) == GIMPLE_RESX)
3957 	{
3958 	  if (stmt_can_throw_external (fun, last))
3959 	    optimize_clobbers (bb);
3960 	  else if (!any_resx_to_process)
3961 	    sink_clobbers (bb, NULL, &any_resx_to_process);
3962 	}
3963       bb->flags &= ~BB_VISITED;
3964     }
3965   if (redirected)
3966     {
3967       free_dominance_info (CDI_DOMINATORS);
3968       delete_unreachable_blocks ();
3969     }
3970 
3971   if (any_resx_to_process)
3972     {
3973       /* Make sure to catch all secondary sinking opportunities by processing
3974 	 blocks in RPO order and after all CFG modifications from lowering
3975 	 and unreachable block removal.  */
3976       int *rpo = XNEWVEC  (int, n_basic_blocks_for_fn (fun));
3977       int rpo_n = pre_and_rev_post_order_compute_fn (fun, NULL, rpo, false);
3978       gimple_seq *sunk = XCNEWVEC (gimple_seq, last_basic_block_for_fn (fun));
3979       for (int i = 0; i < rpo_n; ++i)
3980 	{
3981 	  bb = BASIC_BLOCK_FOR_FN (fun, rpo[i]);
3982 	  gimple *last = last_stmt (bb);
3983 	  if (last
3984 	      && gimple_code (last) == GIMPLE_RESX
3985 	      && !stmt_can_throw_external (fun, last))
3986 	    flags |= sink_clobbers (bb, sunk);
3987 	  /* If there were any clobbers sunk into this BB, insert them now.  */
3988 	  if (!gimple_seq_empty_p (sunk[bb->index]))
3989 	    {
3990 	      gimple_stmt_iterator gsi = gsi_after_labels (bb);
3991 	      gsi_insert_seq_before (&gsi, sunk[bb->index], GSI_NEW_STMT);
3992 	      sunk[bb->index] = NULL;
3993 	    }
3994 	  bb->flags |= BB_VISITED;
3995 	}
3996       free (rpo);
3997       free (sunk);
3998     }
3999 
4000   return flags;
4001 }
4002 
4003 } // anon namespace
4004 
4005 gimple_opt_pass *
make_pass_lower_eh_dispatch(gcc::context * ctxt)4006 make_pass_lower_eh_dispatch (gcc::context *ctxt)
4007 {
4008   return new pass_lower_eh_dispatch (ctxt);
4009 }
4010 
4011 /* Walk statements, see what regions and, optionally, landing pads
4012    are really referenced.
4013 
4014    Returns in R_REACHABLEP an sbitmap with bits set for reachable regions,
4015    and in LP_REACHABLE an sbitmap with bits set for reachable landing pads.
4016 
4017    Passing NULL for LP_REACHABLE is valid, in this case only reachable
4018    regions are marked.
4019 
4020    The caller is responsible for freeing the returned sbitmaps.  */
4021 
4022 static void
mark_reachable_handlers(sbitmap * r_reachablep,sbitmap * lp_reachablep)4023 mark_reachable_handlers (sbitmap *r_reachablep, sbitmap *lp_reachablep)
4024 {
4025   sbitmap r_reachable, lp_reachable;
4026   basic_block bb;
4027   bool mark_landing_pads = (lp_reachablep != NULL);
4028   gcc_checking_assert (r_reachablep != NULL);
4029 
4030   r_reachable = sbitmap_alloc (cfun->eh->region_array->length ());
4031   bitmap_clear (r_reachable);
4032   *r_reachablep = r_reachable;
4033 
4034   if (mark_landing_pads)
4035     {
4036       lp_reachable = sbitmap_alloc (cfun->eh->lp_array->length ());
4037       bitmap_clear (lp_reachable);
4038       *lp_reachablep = lp_reachable;
4039     }
4040   else
4041     lp_reachable = NULL;
4042 
4043   FOR_EACH_BB_FN (bb, cfun)
4044     {
4045       gimple_stmt_iterator gsi;
4046 
4047       for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4048 	{
4049 	  gimple *stmt = gsi_stmt (gsi);
4050 
4051 	  if (mark_landing_pads)
4052 	    {
4053 	      int lp_nr = lookup_stmt_eh_lp (stmt);
4054 
4055 	      /* Negative LP numbers are MUST_NOT_THROW regions which
4056 		 are not considered BB enders.  */
4057 	      if (lp_nr < 0)
4058 		bitmap_set_bit (r_reachable, -lp_nr);
4059 
4060 	      /* Positive LP numbers are real landing pads, and BB enders.  */
4061 	      else if (lp_nr > 0)
4062 		{
4063 		  gcc_assert (gsi_one_before_end_p (gsi));
4064 		  eh_region region = get_eh_region_from_lp_number (lp_nr);
4065 		  bitmap_set_bit (r_reachable, region->index);
4066 		  bitmap_set_bit (lp_reachable, lp_nr);
4067 		}
4068 	    }
4069 
4070 	  /* Avoid removing regions referenced from RESX/EH_DISPATCH.  */
4071 	  switch (gimple_code (stmt))
4072 	    {
4073 	    case GIMPLE_RESX:
4074 	      bitmap_set_bit (r_reachable,
4075 			      gimple_resx_region (as_a <gresx *> (stmt)));
4076 	      break;
4077 	    case GIMPLE_EH_DISPATCH:
4078 	      bitmap_set_bit (r_reachable,
4079 			      gimple_eh_dispatch_region (
4080                                 as_a <geh_dispatch *> (stmt)));
4081 	      break;
4082 	    case GIMPLE_CALL:
4083 	      if (gimple_call_builtin_p (stmt, BUILT_IN_EH_COPY_VALUES))
4084 		for (int i = 0; i < 2; ++i)
4085 		  {
4086 		    tree rt = gimple_call_arg (stmt, i);
4087 		    HOST_WIDE_INT ri = tree_to_shwi (rt);
4088 
4089 		    gcc_assert (ri == (int)ri);
4090 		    bitmap_set_bit (r_reachable, ri);
4091 		  }
4092 	      break;
4093 	    default:
4094 	      break;
4095 	    }
4096 	}
4097     }
4098 }
4099 
4100 /* Remove unreachable handlers and unreachable landing pads.  */
4101 
4102 static void
remove_unreachable_handlers(void)4103 remove_unreachable_handlers (void)
4104 {
4105   sbitmap r_reachable, lp_reachable;
4106   eh_region region;
4107   eh_landing_pad lp;
4108   unsigned i;
4109 
4110   mark_reachable_handlers (&r_reachable, &lp_reachable);
4111 
4112   if (dump_file)
4113     {
4114       fprintf (dump_file, "Before removal of unreachable regions:\n");
4115       dump_eh_tree (dump_file, cfun);
4116       fprintf (dump_file, "Reachable regions: ");
4117       dump_bitmap_file (dump_file, r_reachable);
4118       fprintf (dump_file, "Reachable landing pads: ");
4119       dump_bitmap_file (dump_file, lp_reachable);
4120     }
4121 
4122   if (dump_file)
4123     {
4124       FOR_EACH_VEC_SAFE_ELT (cfun->eh->region_array, i, region)
4125 	if (region && !bitmap_bit_p (r_reachable, region->index))
4126 	  fprintf (dump_file,
4127 		   "Removing unreachable region %d\n",
4128 		   region->index);
4129     }
4130 
4131   remove_unreachable_eh_regions (r_reachable);
4132 
4133   FOR_EACH_VEC_SAFE_ELT (cfun->eh->lp_array, i, lp)
4134     if (lp && !bitmap_bit_p (lp_reachable, lp->index))
4135       {
4136 	if (dump_file)
4137 	  fprintf (dump_file,
4138 		   "Removing unreachable landing pad %d\n",
4139 		   lp->index);
4140 	remove_eh_landing_pad (lp);
4141       }
4142 
4143   if (dump_file)
4144     {
4145       fprintf (dump_file, "\n\nAfter removal of unreachable regions:\n");
4146       dump_eh_tree (dump_file, cfun);
4147       fprintf (dump_file, "\n\n");
4148     }
4149 
4150   sbitmap_free (r_reachable);
4151   sbitmap_free (lp_reachable);
4152 
4153   if (flag_checking)
4154     verify_eh_tree (cfun);
4155 }
4156 
4157 /* Remove unreachable handlers if any landing pads have been removed after
4158    last ehcleanup pass (due to gimple_purge_dead_eh_edges).  */
4159 
4160 void
maybe_remove_unreachable_handlers(void)4161 maybe_remove_unreachable_handlers (void)
4162 {
4163   eh_landing_pad lp;
4164   unsigned i;
4165 
4166   if (cfun->eh == NULL)
4167     return;
4168 
4169   FOR_EACH_VEC_SAFE_ELT (cfun->eh->lp_array, i, lp)
4170     if (lp
4171 	&& (lp->post_landing_pad == NULL_TREE
4172 	    || label_to_block (cfun, lp->post_landing_pad) == NULL))
4173       {
4174 	remove_unreachable_handlers ();
4175 	return;
4176       }
4177 }
4178 
4179 /* Remove regions that do not have landing pads.  This assumes
4180    that remove_unreachable_handlers has already been run, and
4181    that we've just manipulated the landing pads since then.
4182 
4183    Preserve regions with landing pads and regions that prevent
4184    exceptions from propagating further, even if these regions
4185    are not reachable.  */
4186 
4187 static void
remove_unreachable_handlers_no_lp(void)4188 remove_unreachable_handlers_no_lp (void)
4189 {
4190   eh_region region;
4191   sbitmap r_reachable;
4192   unsigned i;
4193 
4194   mark_reachable_handlers (&r_reachable, /*lp_reachablep=*/NULL);
4195 
4196   FOR_EACH_VEC_SAFE_ELT (cfun->eh->region_array, i, region)
4197     {
4198       if (! region)
4199 	continue;
4200 
4201       if (region->landing_pads != NULL
4202 	  || region->type == ERT_MUST_NOT_THROW)
4203 	bitmap_set_bit (r_reachable, region->index);
4204 
4205       if (dump_file
4206 	  && !bitmap_bit_p (r_reachable, region->index))
4207 	fprintf (dump_file,
4208 		 "Removing unreachable region %d\n",
4209 		 region->index);
4210     }
4211 
4212   remove_unreachable_eh_regions (r_reachable);
4213 
4214   sbitmap_free (r_reachable);
4215 }
4216 
4217 /* Undo critical edge splitting on an EH landing pad.  Earlier, we
4218    optimisticaly split all sorts of edges, including EH edges.  The
4219    optimization passes in between may not have needed them; if not,
4220    we should undo the split.
4221 
4222    Recognize this case by having one EH edge incoming to the BB and
4223    one normal edge outgoing; BB should be empty apart from the
4224    post_landing_pad label.
4225 
4226    Note that this is slightly different from the empty handler case
4227    handled by cleanup_empty_eh, in that the actual handler may yet
4228    have actual code but the landing pad has been separated from the
4229    handler.  As such, cleanup_empty_eh relies on this transformation
4230    having been done first.  */
4231 
4232 static bool
unsplit_eh(eh_landing_pad lp)4233 unsplit_eh (eh_landing_pad lp)
4234 {
4235   basic_block bb = label_to_block (cfun, lp->post_landing_pad);
4236   gimple_stmt_iterator gsi;
4237   edge e_in, e_out;
4238 
4239   /* Quickly check the edge counts on BB for singularity.  */
4240   if (!single_pred_p (bb) || !single_succ_p (bb))
4241     return false;
4242   e_in = single_pred_edge (bb);
4243   e_out = single_succ_edge (bb);
4244 
4245   /* Input edge must be EH and output edge must be normal.  */
4246   if ((e_in->flags & EDGE_EH) == 0 || (e_out->flags & EDGE_EH) != 0)
4247     return false;
4248 
4249   /* The block must be empty except for the labels and debug insns.  */
4250   gsi = gsi_after_labels (bb);
4251   if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
4252     gsi_next_nondebug (&gsi);
4253   if (!gsi_end_p (gsi))
4254     return false;
4255 
4256   /* The destination block must not already have a landing pad
4257      for a different region.  */
4258   for (gsi = gsi_start_bb (e_out->dest); !gsi_end_p (gsi); gsi_next (&gsi))
4259     {
4260       glabel *label_stmt = dyn_cast <glabel *> (gsi_stmt (gsi));
4261       tree lab;
4262       int lp_nr;
4263 
4264       if (!label_stmt)
4265 	break;
4266       lab = gimple_label_label (label_stmt);
4267       lp_nr = EH_LANDING_PAD_NR (lab);
4268       if (lp_nr && get_eh_region_from_lp_number (lp_nr) != lp->region)
4269 	return false;
4270     }
4271 
4272   /* The new destination block must not already be a destination of
4273      the source block, lest we merge fallthru and eh edges and get
4274      all sorts of confused.  */
4275   if (find_edge (e_in->src, e_out->dest))
4276     return false;
4277 
4278   /* ??? We can get degenerate phis due to cfg cleanups.  I would have
4279      thought this should have been cleaned up by a phicprop pass, but
4280      that doesn't appear to handle virtuals.  Propagate by hand.  */
4281   if (!gimple_seq_empty_p (phi_nodes (bb)))
4282     {
4283       for (gphi_iterator gpi = gsi_start_phis (bb); !gsi_end_p (gpi); )
4284 	{
4285 	  gimple *use_stmt;
4286 	  gphi *phi = gpi.phi ();
4287 	  tree lhs = gimple_phi_result (phi);
4288 	  tree rhs = gimple_phi_arg_def (phi, 0);
4289 	  use_operand_p use_p;
4290 	  imm_use_iterator iter;
4291 
4292 	  FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
4293 	    {
4294 	      FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
4295 		SET_USE (use_p, rhs);
4296 	    }
4297 
4298 	  if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
4299 	    SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs) = 1;
4300 
4301 	  remove_phi_node (&gpi, true);
4302 	}
4303     }
4304 
4305   if (dump_file && (dump_flags & TDF_DETAILS))
4306     fprintf (dump_file, "Unsplit EH landing pad %d to block %i.\n",
4307 	     lp->index, e_out->dest->index);
4308 
4309   /* Redirect the edge.  Since redirect_eh_edge_1 expects to be moving
4310      a successor edge, humor it.  But do the real CFG change with the
4311      predecessor of E_OUT in order to preserve the ordering of arguments
4312      to the PHI nodes in E_OUT->DEST.  */
4313   redirect_eh_edge_1 (e_in, e_out->dest, false);
4314   redirect_edge_pred (e_out, e_in->src);
4315   e_out->flags = e_in->flags;
4316   e_out->probability = e_in->probability;
4317   remove_edge (e_in);
4318 
4319   return true;
4320 }
4321 
4322 /* Examine each landing pad block and see if it matches unsplit_eh.  */
4323 
4324 static bool
unsplit_all_eh(void)4325 unsplit_all_eh (void)
4326 {
4327   bool changed = false;
4328   eh_landing_pad lp;
4329   int i;
4330 
4331   for (i = 1; vec_safe_iterate (cfun->eh->lp_array, i, &lp); ++i)
4332     if (lp)
4333       changed |= unsplit_eh (lp);
4334 
4335   return changed;
4336 }
4337 
4338 /* Wrapper around unsplit_all_eh that makes it usable everywhere.  */
4339 
4340 void
unsplit_eh_edges(void)4341 unsplit_eh_edges (void)
4342 {
4343   bool changed;
4344 
4345   /* unsplit_all_eh can die looking up unreachable landing pads.  */
4346   maybe_remove_unreachable_handlers ();
4347 
4348   changed = unsplit_all_eh ();
4349 
4350   /* If EH edges have been unsplit, delete unreachable forwarder blocks.  */
4351   if (changed)
4352     {
4353       free_dominance_info (CDI_DOMINATORS);
4354       free_dominance_info (CDI_POST_DOMINATORS);
4355       delete_unreachable_blocks ();
4356     }
4357 }
4358 
4359 /* A subroutine of cleanup_empty_eh.  Redirect all EH edges incoming
4360    to OLD_BB to NEW_BB; return true on success, false on failure.
4361 
4362    OLD_BB_OUT is the edge into NEW_BB from OLD_BB, so if we miss any
4363    PHI variables from OLD_BB we can pick them up from OLD_BB_OUT.
4364    Virtual PHIs may be deleted and marked for renaming.  */
4365 
4366 static bool
cleanup_empty_eh_merge_phis(basic_block new_bb,basic_block old_bb,edge old_bb_out,bool change_region)4367 cleanup_empty_eh_merge_phis (basic_block new_bb, basic_block old_bb,
4368 			     edge old_bb_out, bool change_region)
4369 {
4370   gphi_iterator ngsi, ogsi;
4371   edge_iterator ei;
4372   edge e;
4373   bitmap ophi_handled;
4374 
4375   /* The destination block must not be a regular successor for any
4376      of the preds of the landing pad.  Thus, avoid turning
4377         <..>
4378 	 |  \ EH
4379 	 |  <..>
4380 	 |  /
4381 	<..>
4382      into
4383         <..>
4384 	|  | EH
4385 	<..>
4386      which CFG verification would choke on.  See PR45172 and PR51089.  */
4387   if (!single_pred_p (new_bb))
4388     FOR_EACH_EDGE (e, ei, old_bb->preds)
4389       if (find_edge (e->src, new_bb))
4390 	return false;
4391 
4392   FOR_EACH_EDGE (e, ei, old_bb->preds)
4393     redirect_edge_var_map_clear (e);
4394 
4395   ophi_handled = BITMAP_ALLOC (NULL);
4396 
4397   /* First, iterate through the PHIs on NEW_BB and set up the edge_var_map
4398      for the edges we're going to move.  */
4399   for (ngsi = gsi_start_phis (new_bb); !gsi_end_p (ngsi); gsi_next (&ngsi))
4400     {
4401       gphi *ophi, *nphi = ngsi.phi ();
4402       tree nresult, nop;
4403 
4404       nresult = gimple_phi_result (nphi);
4405       nop = gimple_phi_arg_def (nphi, old_bb_out->dest_idx);
4406 
4407       /* Find the corresponding PHI in OLD_BB so we can forward-propagate
4408 	 the source ssa_name.  */
4409       ophi = NULL;
4410       for (ogsi = gsi_start_phis (old_bb); !gsi_end_p (ogsi); gsi_next (&ogsi))
4411 	{
4412 	  ophi = ogsi.phi ();
4413 	  if (gimple_phi_result (ophi) == nop)
4414 	    break;
4415 	  ophi = NULL;
4416 	}
4417 
4418       /* If we did find the corresponding PHI, copy those inputs.  */
4419       if (ophi)
4420 	{
4421 	  /* If NOP is used somewhere else beyond phis in new_bb, give up.  */
4422 	  if (!has_single_use (nop))
4423 	    {
4424 	      imm_use_iterator imm_iter;
4425 	      use_operand_p use_p;
4426 
4427 	      FOR_EACH_IMM_USE_FAST (use_p, imm_iter, nop)
4428 		{
4429 		  if (!gimple_debug_bind_p (USE_STMT (use_p))
4430 		      && (gimple_code (USE_STMT (use_p)) != GIMPLE_PHI
4431 			  || gimple_bb (USE_STMT (use_p)) != new_bb))
4432 		    goto fail;
4433 		}
4434 	    }
4435 	  bitmap_set_bit (ophi_handled, SSA_NAME_VERSION (nop));
4436 	  FOR_EACH_EDGE (e, ei, old_bb->preds)
4437 	    {
4438 	      location_t oloc;
4439 	      tree oop;
4440 
4441 	      if ((e->flags & EDGE_EH) == 0)
4442 		continue;
4443 	      oop = gimple_phi_arg_def (ophi, e->dest_idx);
4444 	      oloc = gimple_phi_arg_location (ophi, e->dest_idx);
4445 	      redirect_edge_var_map_add (e, nresult, oop, oloc);
4446 	    }
4447 	}
4448       /* If we didn't find the PHI, if it's a real variable or a VOP, we know
4449 	 from the fact that OLD_BB is tree_empty_eh_handler_p that the
4450 	 variable is unchanged from input to the block and we can simply
4451 	 re-use the input to NEW_BB from the OLD_BB_OUT edge.  */
4452       else
4453 	{
4454 	  location_t nloc
4455 	    = gimple_phi_arg_location (nphi, old_bb_out->dest_idx);
4456 	  FOR_EACH_EDGE (e, ei, old_bb->preds)
4457 	    redirect_edge_var_map_add (e, nresult, nop, nloc);
4458 	}
4459     }
4460 
4461   /* Second, verify that all PHIs from OLD_BB have been handled.  If not,
4462      we don't know what values from the other edges into NEW_BB to use.  */
4463   for (ogsi = gsi_start_phis (old_bb); !gsi_end_p (ogsi); gsi_next (&ogsi))
4464     {
4465       gphi *ophi = ogsi.phi ();
4466       tree oresult = gimple_phi_result (ophi);
4467       if (!bitmap_bit_p (ophi_handled, SSA_NAME_VERSION (oresult)))
4468 	goto fail;
4469     }
4470 
4471   /* Finally, move the edges and update the PHIs.  */
4472   for (ei = ei_start (old_bb->preds); (e = ei_safe_edge (ei)); )
4473     if (e->flags & EDGE_EH)
4474       {
4475 	/* ???  CFG manipluation routines do not try to update loop
4476 	   form on edge redirection.  Do so manually here for now.  */
4477 	/* If we redirect a loop entry or latch edge that will either create
4478 	   a multiple entry loop or rotate the loop.  If the loops merge
4479 	   we may have created a loop with multiple latches.
4480 	   All of this isn't easily fixed thus cancel the affected loop
4481 	   and mark the other loop as possibly having multiple latches.  */
4482 	if (e->dest == e->dest->loop_father->header)
4483 	  {
4484 	    mark_loop_for_removal (e->dest->loop_father);
4485 	    new_bb->loop_father->latch = NULL;
4486 	    loops_state_set (LOOPS_MAY_HAVE_MULTIPLE_LATCHES);
4487 	  }
4488 	redirect_eh_edge_1 (e, new_bb, change_region);
4489 	redirect_edge_succ (e, new_bb);
4490 	flush_pending_stmts (e);
4491       }
4492     else
4493       ei_next (&ei);
4494 
4495   BITMAP_FREE (ophi_handled);
4496   return true;
4497 
4498  fail:
4499   FOR_EACH_EDGE (e, ei, old_bb->preds)
4500     redirect_edge_var_map_clear (e);
4501   BITMAP_FREE (ophi_handled);
4502   return false;
4503 }
4504 
4505 /* A subroutine of cleanup_empty_eh.  Move a landing pad LP from its
4506    old region to NEW_REGION at BB.  */
4507 
4508 static void
cleanup_empty_eh_move_lp(basic_block bb,edge e_out,eh_landing_pad lp,eh_region new_region)4509 cleanup_empty_eh_move_lp (basic_block bb, edge e_out,
4510 			  eh_landing_pad lp, eh_region new_region)
4511 {
4512   gimple_stmt_iterator gsi;
4513   eh_landing_pad *pp;
4514 
4515   for (pp = &lp->region->landing_pads; *pp != lp; pp = &(*pp)->next_lp)
4516     continue;
4517   *pp = lp->next_lp;
4518 
4519   lp->region = new_region;
4520   lp->next_lp = new_region->landing_pads;
4521   new_region->landing_pads = lp;
4522 
4523   /* Delete the RESX that was matched within the empty handler block.  */
4524   gsi = gsi_last_bb (bb);
4525   unlink_stmt_vdef (gsi_stmt (gsi));
4526   gsi_remove (&gsi, true);
4527 
4528   /* Clean up E_OUT for the fallthru.  */
4529   e_out->flags = (e_out->flags & ~EDGE_EH) | EDGE_FALLTHRU;
4530   e_out->probability = profile_probability::always ();
4531 }
4532 
4533 /* A subroutine of cleanup_empty_eh.  Handle more complex cases of
4534    unsplitting than unsplit_eh was prepared to handle, e.g. when
4535    multiple incoming edges and phis are involved.  */
4536 
4537 static bool
cleanup_empty_eh_unsplit(basic_block bb,edge e_out,eh_landing_pad lp)4538 cleanup_empty_eh_unsplit (basic_block bb, edge e_out, eh_landing_pad lp)
4539 {
4540   gimple_stmt_iterator gsi;
4541   tree lab;
4542 
4543   /* We really ought not have totally lost everything following
4544      a landing pad label.  Given that BB is empty, there had better
4545      be a successor.  */
4546   gcc_assert (e_out != NULL);
4547 
4548   /* The destination block must not already have a landing pad
4549      for a different region.  */
4550   lab = NULL;
4551   for (gsi = gsi_start_bb (e_out->dest); !gsi_end_p (gsi); gsi_next (&gsi))
4552     {
4553       glabel *stmt = dyn_cast <glabel *> (gsi_stmt (gsi));
4554       int lp_nr;
4555 
4556       if (!stmt)
4557 	break;
4558       lab = gimple_label_label (stmt);
4559       lp_nr = EH_LANDING_PAD_NR (lab);
4560       if (lp_nr && get_eh_region_from_lp_number (lp_nr) != lp->region)
4561 	return false;
4562     }
4563 
4564   /* Attempt to move the PHIs into the successor block.  */
4565   if (cleanup_empty_eh_merge_phis (e_out->dest, bb, e_out, false))
4566     {
4567       if (dump_file && (dump_flags & TDF_DETAILS))
4568 	fprintf (dump_file,
4569 		 "Unsplit EH landing pad %d to block %i "
4570 		 "(via cleanup_empty_eh).\n",
4571 		 lp->index, e_out->dest->index);
4572       return true;
4573     }
4574 
4575   return false;
4576 }
4577 
4578 /* Return true if edge E_FIRST is part of an empty infinite loop
4579    or leads to such a loop through a series of single successor
4580    empty bbs.  */
4581 
4582 static bool
infinite_empty_loop_p(edge e_first)4583 infinite_empty_loop_p (edge e_first)
4584 {
4585   bool inf_loop = false;
4586   edge e;
4587 
4588   if (e_first->dest == e_first->src)
4589     return true;
4590 
4591   e_first->src->aux = (void *) 1;
4592   for (e = e_first; single_succ_p (e->dest); e = single_succ_edge (e->dest))
4593     {
4594       gimple_stmt_iterator gsi;
4595       if (e->dest->aux)
4596 	{
4597 	  inf_loop = true;
4598 	  break;
4599 	}
4600       e->dest->aux = (void *) 1;
4601       gsi = gsi_after_labels (e->dest);
4602       if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
4603 	gsi_next_nondebug (&gsi);
4604       if (!gsi_end_p (gsi))
4605 	break;
4606     }
4607   e_first->src->aux = NULL;
4608   for (e = e_first; e->dest->aux; e = single_succ_edge (e->dest))
4609     e->dest->aux = NULL;
4610 
4611   return inf_loop;
4612 }
4613 
4614 /* Examine the block associated with LP to determine if it's an empty
4615    handler for its EH region.  If so, attempt to redirect EH edges to
4616    an outer region.  Return true the CFG was updated in any way.  This
4617    is similar to jump forwarding, just across EH edges.  */
4618 
4619 static bool
cleanup_empty_eh(eh_landing_pad lp)4620 cleanup_empty_eh (eh_landing_pad lp)
4621 {
4622   basic_block bb = label_to_block (cfun, lp->post_landing_pad);
4623   gimple_stmt_iterator gsi;
4624   gimple *resx;
4625   eh_region new_region;
4626   edge_iterator ei;
4627   edge e, e_out;
4628   bool has_non_eh_pred;
4629   bool ret = false;
4630   int new_lp_nr;
4631 
4632   /* There can be zero or one edges out of BB.  This is the quickest test.  */
4633   switch (EDGE_COUNT (bb->succs))
4634     {
4635     case 0:
4636       e_out = NULL;
4637       break;
4638     case 1:
4639       e_out = single_succ_edge (bb);
4640       break;
4641     default:
4642       return false;
4643     }
4644 
4645   gsi = gsi_last_nondebug_bb (bb);
4646   resx = gsi_stmt (gsi);
4647   if (resx && is_gimple_resx (resx))
4648     {
4649       if (stmt_can_throw_external (cfun, resx))
4650 	optimize_clobbers (bb);
4651       else if (sink_clobbers (bb))
4652 	ret = true;
4653     }
4654 
4655   gsi = gsi_after_labels (bb);
4656 
4657   /* Make sure to skip debug statements.  */
4658   if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
4659     gsi_next_nondebug (&gsi);
4660 
4661   /* If the block is totally empty, look for more unsplitting cases.  */
4662   if (gsi_end_p (gsi))
4663     {
4664       /* For the degenerate case of an infinite loop bail out.
4665 	 If bb has no successors and is totally empty, which can happen e.g.
4666 	 because of incorrect noreturn attribute, bail out too.  */
4667       if (e_out == NULL
4668 	  || infinite_empty_loop_p (e_out))
4669 	return ret;
4670 
4671       return ret | cleanup_empty_eh_unsplit (bb, e_out, lp);
4672     }
4673 
4674   /* The block should consist only of a single RESX statement, modulo a
4675      preceding call to __builtin_stack_restore if there is no outgoing
4676      edge, since the call can be eliminated in this case.  */
4677   resx = gsi_stmt (gsi);
4678   if (!e_out && gimple_call_builtin_p (resx, BUILT_IN_STACK_RESTORE))
4679     {
4680       gsi_next_nondebug (&gsi);
4681       resx = gsi_stmt (gsi);
4682     }
4683   if (!is_gimple_resx (resx))
4684     return ret;
4685   gcc_assert (gsi_one_nondebug_before_end_p (gsi));
4686 
4687   /* Determine if there are non-EH edges, or resx edges into the handler.  */
4688   has_non_eh_pred = false;
4689   FOR_EACH_EDGE (e, ei, bb->preds)
4690     if (!(e->flags & EDGE_EH))
4691       has_non_eh_pred = true;
4692 
4693   /* Find the handler that's outer of the empty handler by looking at
4694      where the RESX instruction was vectored.  */
4695   new_lp_nr = lookup_stmt_eh_lp (resx);
4696   new_region = get_eh_region_from_lp_number (new_lp_nr);
4697 
4698   /* If there's no destination region within the current function,
4699      redirection is trivial via removing the throwing statements from
4700      the EH region, removing the EH edges, and allowing the block
4701      to go unreachable.  */
4702   if (new_region == NULL)
4703     {
4704       gcc_assert (e_out == NULL);
4705       for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
4706 	if (e->flags & EDGE_EH)
4707 	  {
4708 	    gimple *stmt = last_stmt (e->src);
4709 	    remove_stmt_from_eh_lp (stmt);
4710 	    remove_edge (e);
4711 	  }
4712 	else
4713 	  ei_next (&ei);
4714       goto succeed;
4715     }
4716 
4717   /* If the destination region is a MUST_NOT_THROW, allow the runtime
4718      to handle the abort and allow the blocks to go unreachable.  */
4719   if (new_region->type == ERT_MUST_NOT_THROW)
4720     {
4721       for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
4722 	if (e->flags & EDGE_EH)
4723 	  {
4724 	    gimple *stmt = last_stmt (e->src);
4725 	    remove_stmt_from_eh_lp (stmt);
4726 	    add_stmt_to_eh_lp (stmt, new_lp_nr);
4727 	    remove_edge (e);
4728 	  }
4729 	else
4730 	  ei_next (&ei);
4731       goto succeed;
4732     }
4733 
4734   /* Try to redirect the EH edges and merge the PHIs into the destination
4735      landing pad block.  If the merge succeeds, we'll already have redirected
4736      all the EH edges.  The handler itself will go unreachable if there were
4737      no normal edges.  */
4738   if (cleanup_empty_eh_merge_phis (e_out->dest, bb, e_out, true))
4739     goto succeed;
4740 
4741   /* Finally, if all input edges are EH edges, then we can (potentially)
4742      reduce the number of transfers from the runtime by moving the landing
4743      pad from the original region to the new region.  This is a win when
4744      we remove the last CLEANUP region along a particular exception
4745      propagation path.  Since nothing changes except for the region with
4746      which the landing pad is associated, the PHI nodes do not need to be
4747      adjusted at all.  */
4748   if (!has_non_eh_pred)
4749     {
4750       cleanup_empty_eh_move_lp (bb, e_out, lp, new_region);
4751       if (dump_file && (dump_flags & TDF_DETAILS))
4752 	fprintf (dump_file, "Empty EH handler %i moved to EH region %i.\n",
4753 		 lp->index, new_region->index);
4754 
4755       /* ??? The CFG didn't change, but we may have rendered the
4756 	 old EH region unreachable.  Trigger a cleanup there.  */
4757       return true;
4758     }
4759 
4760   return ret;
4761 
4762  succeed:
4763   if (dump_file && (dump_flags & TDF_DETAILS))
4764     fprintf (dump_file, "Empty EH handler %i removed.\n", lp->index);
4765   remove_eh_landing_pad (lp);
4766   return true;
4767 }
4768 
4769 /* Do a post-order traversal of the EH region tree.  Examine each
4770    post_landing_pad block and see if we can eliminate it as empty.  */
4771 
4772 static bool
cleanup_all_empty_eh(void)4773 cleanup_all_empty_eh (void)
4774 {
4775   bool changed = false;
4776   eh_landing_pad lp;
4777   int i;
4778 
4779   /* Ideally we'd walk the region tree and process LPs inner to outer
4780      to avoid quadraticness in EH redirection.  Walking the LP array
4781      in reverse seems to be an approximation of that.  */
4782   for (i = vec_safe_length (cfun->eh->lp_array) - 1; i >= 1; --i)
4783     {
4784       lp = (*cfun->eh->lp_array)[i];
4785       if (lp)
4786 	changed |= cleanup_empty_eh (lp);
4787     }
4788 
4789   return changed;
4790 }
4791 
4792 /* Perform cleanups and lowering of exception handling
4793     1) cleanups regions with handlers doing nothing are optimized out
4794     2) MUST_NOT_THROW regions that became dead because of 1) are optimized out
4795     3) Info about regions that are containing instructions, and regions
4796        reachable via local EH edges is collected
4797     4) Eh tree is pruned for regions no longer necessary.
4798 
4799    TODO: Push MUST_NOT_THROW regions to the root of the EH tree.
4800 	 Unify those that have the same failure decl and locus.
4801 */
4802 
4803 static unsigned int
execute_cleanup_eh_1(void)4804 execute_cleanup_eh_1 (void)
4805 {
4806   /* Do this first: unsplit_all_eh and cleanup_all_empty_eh can die
4807      looking up unreachable landing pads.  */
4808   remove_unreachable_handlers ();
4809 
4810   /* Watch out for the region tree vanishing due to all unreachable.  */
4811   if (cfun->eh->region_tree)
4812     {
4813       bool changed = false;
4814 
4815       if (optimize)
4816 	changed |= unsplit_all_eh ();
4817       changed |= cleanup_all_empty_eh ();
4818 
4819       if (changed)
4820 	{
4821 	  free_dominance_info (CDI_DOMINATORS);
4822 	  free_dominance_info (CDI_POST_DOMINATORS);
4823 
4824           /* We delayed all basic block deletion, as we may have performed
4825 	     cleanups on EH edges while non-EH edges were still present.  */
4826 	  delete_unreachable_blocks ();
4827 
4828 	  /* We manipulated the landing pads.  Remove any region that no
4829 	     longer has a landing pad.  */
4830 	  remove_unreachable_handlers_no_lp ();
4831 
4832 	  return TODO_cleanup_cfg | TODO_update_ssa_only_virtuals;
4833 	}
4834     }
4835 
4836   return 0;
4837 }
4838 
4839 namespace {
4840 
4841 const pass_data pass_data_cleanup_eh =
4842 {
4843   GIMPLE_PASS, /* type */
4844   "ehcleanup", /* name */
4845   OPTGROUP_NONE, /* optinfo_flags */
4846   TV_TREE_EH, /* tv_id */
4847   PROP_gimple_lcf, /* properties_required */
4848   0, /* properties_provided */
4849   0, /* properties_destroyed */
4850   0, /* todo_flags_start */
4851   0, /* todo_flags_finish */
4852 };
4853 
4854 class pass_cleanup_eh : public gimple_opt_pass
4855 {
4856 public:
pass_cleanup_eh(gcc::context * ctxt)4857   pass_cleanup_eh (gcc::context *ctxt)
4858     : gimple_opt_pass (pass_data_cleanup_eh, ctxt)
4859   {}
4860 
4861   /* opt_pass methods: */
clone()4862   opt_pass * clone () { return new pass_cleanup_eh (m_ctxt); }
gate(function * fun)4863   virtual bool gate (function *fun)
4864     {
4865       return fun->eh != NULL && fun->eh->region_tree != NULL;
4866     }
4867 
4868   virtual unsigned int execute (function *);
4869 
4870 }; // class pass_cleanup_eh
4871 
4872 unsigned int
execute(function * fun)4873 pass_cleanup_eh::execute (function *fun)
4874 {
4875   int ret = execute_cleanup_eh_1 ();
4876 
4877   /* If the function no longer needs an EH personality routine
4878      clear it.  This exposes cross-language inlining opportunities
4879      and avoids references to a never defined personality routine.  */
4880   if (DECL_FUNCTION_PERSONALITY (current_function_decl)
4881       && function_needs_eh_personality (fun) != eh_personality_lang)
4882     DECL_FUNCTION_PERSONALITY (current_function_decl) = NULL_TREE;
4883 
4884   return ret;
4885 }
4886 
4887 } // anon namespace
4888 
4889 gimple_opt_pass *
make_pass_cleanup_eh(gcc::context * ctxt)4890 make_pass_cleanup_eh (gcc::context *ctxt)
4891 {
4892   return new pass_cleanup_eh (ctxt);
4893 }
4894 
4895 /* Disable warnings about missing quoting in GCC diagnostics for
4896    the verification errors.  Their format strings don't follow GCC
4897    diagnostic conventions but are only used for debugging.  */
4898 #if __GNUC__ >= 10
4899 #  pragma GCC diagnostic push
4900 #  pragma GCC diagnostic ignored "-Wformat-diag"
4901 #endif
4902 
4903 /* Verify that BB containing STMT as the last statement, has precisely the
4904    edge that make_eh_edges would create.  */
4905 
4906 DEBUG_FUNCTION bool
verify_eh_edges(gimple * stmt)4907 verify_eh_edges (gimple *stmt)
4908 {
4909   basic_block bb = gimple_bb (stmt);
4910   eh_landing_pad lp = NULL;
4911   int lp_nr;
4912   edge_iterator ei;
4913   edge e, eh_edge;
4914 
4915   lp_nr = lookup_stmt_eh_lp (stmt);
4916   if (lp_nr > 0)
4917     lp = get_eh_landing_pad_from_number (lp_nr);
4918 
4919   eh_edge = NULL;
4920   FOR_EACH_EDGE (e, ei, bb->succs)
4921     {
4922       if (e->flags & EDGE_EH)
4923 	{
4924 	  if (eh_edge)
4925 	    {
4926 	      error ("BB %i has multiple EH edges", bb->index);
4927 	      return true;
4928 	    }
4929 	  else
4930 	    eh_edge = e;
4931 	}
4932     }
4933 
4934   if (lp == NULL)
4935     {
4936       if (eh_edge)
4937 	{
4938 	  error ("BB %i cannot throw but has an EH edge", bb->index);
4939 	  return true;
4940 	}
4941       return false;
4942     }
4943 
4944   if (!stmt_could_throw_p (cfun, stmt))
4945     {
4946       error ("BB %i last statement has incorrectly set lp", bb->index);
4947       return true;
4948     }
4949 
4950   if (eh_edge == NULL)
4951     {
4952       error ("BB %i is missing an EH edge", bb->index);
4953       return true;
4954     }
4955 
4956   if (eh_edge->dest != label_to_block (cfun, lp->post_landing_pad))
4957     {
4958       error ("Incorrect EH edge %i->%i", bb->index, eh_edge->dest->index);
4959       return true;
4960     }
4961 
4962   return false;
4963 }
4964 
4965 /* Similarly, but handle GIMPLE_EH_DISPATCH specifically.  */
4966 
4967 DEBUG_FUNCTION bool
verify_eh_dispatch_edge(geh_dispatch * stmt)4968 verify_eh_dispatch_edge (geh_dispatch *stmt)
4969 {
4970   eh_region r;
4971   eh_catch c;
4972   basic_block src, dst;
4973   bool want_fallthru = true;
4974   edge_iterator ei;
4975   edge e, fall_edge;
4976 
4977   r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
4978   src = gimple_bb (stmt);
4979 
4980   FOR_EACH_EDGE (e, ei, src->succs)
4981     gcc_assert (e->aux == NULL);
4982 
4983   switch (r->type)
4984     {
4985     case ERT_TRY:
4986       for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
4987 	{
4988 	  dst = label_to_block (cfun, c->label);
4989 	  e = find_edge (src, dst);
4990 	  if (e == NULL)
4991 	    {
4992 	      error ("BB %i is missing an edge", src->index);
4993 	      return true;
4994 	    }
4995 	  e->aux = (void *)e;
4996 
4997 	  /* A catch-all handler doesn't have a fallthru.  */
4998 	  if (c->type_list == NULL)
4999 	    {
5000 	      want_fallthru = false;
5001 	      break;
5002 	    }
5003 	}
5004       break;
5005 
5006     case ERT_ALLOWED_EXCEPTIONS:
5007       dst = label_to_block (cfun, r->u.allowed.label);
5008       e = find_edge (src, dst);
5009       if (e == NULL)
5010 	{
5011 	  error ("BB %i is missing an edge", src->index);
5012 	  return true;
5013 	}
5014       e->aux = (void *)e;
5015       break;
5016 
5017     default:
5018       gcc_unreachable ();
5019     }
5020 
5021   fall_edge = NULL;
5022   FOR_EACH_EDGE (e, ei, src->succs)
5023     {
5024       if (e->flags & EDGE_FALLTHRU)
5025 	{
5026 	  if (fall_edge != NULL)
5027 	    {
5028 	      error ("BB %i too many fallthru edges", src->index);
5029 	      return true;
5030 	    }
5031 	  fall_edge = e;
5032 	}
5033       else if (e->aux)
5034 	e->aux = NULL;
5035       else
5036 	{
5037 	  error ("BB %i has incorrect edge", src->index);
5038 	  return true;
5039 	}
5040     }
5041   if ((fall_edge != NULL) ^ want_fallthru)
5042     {
5043       error ("BB %i has incorrect fallthru edge", src->index);
5044       return true;
5045     }
5046 
5047   return false;
5048 }
5049 
5050 #if __GNUC__ >= 10
5051 #  pragma GCC diagnostic pop
5052 #endif
5053