xref: /netbsd-src/external/gpl3/gcc.old/dist/gcc/tree-eh.c (revision e6c7e151de239c49d2e38720a061ed9d1fa99309)
1 /* Exception handling semantics and decomposition for trees.
2    Copyright (C) 2003-2017 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
10 
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 GNU General Public License for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "rtl.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "cfghooks.h"
28 #include "tree-pass.h"
29 #include "ssa.h"
30 #include "cgraph.h"
31 #include "diagnostic-core.h"
32 #include "fold-const.h"
33 #include "calls.h"
34 #include "except.h"
35 #include "cfganal.h"
36 #include "cfgcleanup.h"
37 #include "tree-eh.h"
38 #include "gimple-iterator.h"
39 #include "tree-cfg.h"
40 #include "tree-into-ssa.h"
41 #include "tree-ssa.h"
42 #include "tree-inline.h"
43 #include "langhooks.h"
44 #include "cfgloop.h"
45 #include "gimple-low.h"
46 #include "asan.h"
47 #include "gimplify.h"
48 
49 /* In some instances a tree and a gimple need to be stored in a same table,
50    i.e. in hash tables. This is a structure to do this. */
51 typedef union {tree *tp; tree t; gimple *g;} treemple;
52 
53 /* Misc functions used in this file.  */
54 
55 /* Remember and lookup EH landing pad data for arbitrary statements.
56    Really this means any statement that could_throw_p.  We could
57    stuff this information into the stmt_ann data structure, but:
58 
59    (1) We absolutely rely on this information being kept until
60    we get to rtl.  Once we're done with lowering here, if we lose
61    the information there's no way to recover it!
62 
63    (2) There are many more statements that *cannot* throw as
64    compared to those that can.  We should be saving some amount
65    of space by only allocating memory for those that can throw.  */
66 
67 /* Add statement T in function IFUN to landing pad NUM.  */
68 
69 static void
70 add_stmt_to_eh_lp_fn (struct function *ifun, gimple *t, int num)
71 {
72   gcc_assert (num != 0);
73 
74   if (!get_eh_throw_stmt_table (ifun))
75     set_eh_throw_stmt_table (ifun, hash_map<gimple *, int>::create_ggc (31));
76 
77   gcc_assert (!get_eh_throw_stmt_table (ifun)->put (t, num));
78 }
79 
80 /* Add statement T in the current function (cfun) to EH landing pad NUM.  */
81 
82 void
83 add_stmt_to_eh_lp (gimple *t, int num)
84 {
85   add_stmt_to_eh_lp_fn (cfun, t, num);
86 }
87 
88 /* Add statement T to the single EH landing pad in REGION.  */
89 
90 static void
91 record_stmt_eh_region (eh_region region, gimple *t)
92 {
93   if (region == NULL)
94     return;
95   if (region->type == ERT_MUST_NOT_THROW)
96     add_stmt_to_eh_lp_fn (cfun, t, -region->index);
97   else
98     {
99       eh_landing_pad lp = region->landing_pads;
100       if (lp == NULL)
101 	lp = gen_eh_landing_pad (region);
102       else
103 	gcc_assert (lp->next_lp == NULL);
104       add_stmt_to_eh_lp_fn (cfun, t, lp->index);
105     }
106 }
107 
108 
109 /* Remove statement T in function IFUN from its EH landing pad.  */
110 
111 bool
112 remove_stmt_from_eh_lp_fn (struct function *ifun, gimple *t)
113 {
114   if (!get_eh_throw_stmt_table (ifun))
115     return false;
116 
117   if (!get_eh_throw_stmt_table (ifun)->get (t))
118     return false;
119 
120   get_eh_throw_stmt_table (ifun)->remove (t);
121       return true;
122 }
123 
124 
125 /* Remove statement T in the current function (cfun) from its
126    EH landing pad.  */
127 
128 bool
129 remove_stmt_from_eh_lp (gimple *t)
130 {
131   return remove_stmt_from_eh_lp_fn (cfun, t);
132 }
133 
134 /* Determine if statement T is inside an EH region in function IFUN.
135    Positive numbers indicate a landing pad index; negative numbers
136    indicate a MUST_NOT_THROW region index; zero indicates that the
137    statement is not recorded in the region table.  */
138 
139 int
140 lookup_stmt_eh_lp_fn (struct function *ifun, gimple *t)
141 {
142   if (ifun->eh->throw_stmt_table == NULL)
143     return 0;
144 
145   int *lp_nr = ifun->eh->throw_stmt_table->get (t);
146   return lp_nr ? *lp_nr : 0;
147 }
148 
149 /* Likewise, but always use the current function.  */
150 
151 int
152 lookup_stmt_eh_lp (gimple *t)
153 {
154   /* We can get called from initialized data when -fnon-call-exceptions
155      is on; prevent crash.  */
156   if (!cfun)
157     return 0;
158   return lookup_stmt_eh_lp_fn (cfun, t);
159 }
160 
161 /* First pass of EH node decomposition.  Build up a tree of GIMPLE_TRY_FINALLY
162    nodes and LABEL_DECL nodes.  We will use this during the second phase to
163    determine if a goto leaves the body of a TRY_FINALLY_EXPR node.  */
164 
165 struct finally_tree_node
166 {
167   /* When storing a GIMPLE_TRY, we have to record a gimple.  However
168      when deciding whether a GOTO to a certain LABEL_DECL (which is a
169      tree) leaves the TRY block, its necessary to record a tree in
170      this field.  Thus a treemple is used. */
171   treemple child;
172   gtry *parent;
173 };
174 
175 /* Hashtable helpers.  */
176 
177 struct finally_tree_hasher : free_ptr_hash <finally_tree_node>
178 {
179   static inline hashval_t hash (const finally_tree_node *);
180   static inline bool equal (const finally_tree_node *,
181 			    const finally_tree_node *);
182 };
183 
184 inline hashval_t
185 finally_tree_hasher::hash (const finally_tree_node *v)
186 {
187   return (intptr_t)v->child.t >> 4;
188 }
189 
190 inline bool
191 finally_tree_hasher::equal (const finally_tree_node *v,
192 			    const finally_tree_node *c)
193 {
194   return v->child.t == c->child.t;
195 }
196 
197 /* Note that this table is *not* marked GTY.  It is short-lived.  */
198 static hash_table<finally_tree_hasher> *finally_tree;
199 
200 static void
201 record_in_finally_tree (treemple child, gtry *parent)
202 {
203   struct finally_tree_node *n;
204   finally_tree_node **slot;
205 
206   n = XNEW (struct finally_tree_node);
207   n->child = child;
208   n->parent = parent;
209 
210   slot = finally_tree->find_slot (n, INSERT);
211   gcc_assert (!*slot);
212   *slot = n;
213 }
214 
215 static void
216 collect_finally_tree (gimple *stmt, gtry *region);
217 
218 /* Go through the gimple sequence.  Works with collect_finally_tree to
219    record all GIMPLE_LABEL and GIMPLE_TRY statements. */
220 
221 static void
222 collect_finally_tree_1 (gimple_seq seq, gtry *region)
223 {
224   gimple_stmt_iterator gsi;
225 
226   for (gsi = gsi_start (seq); !gsi_end_p (gsi); gsi_next (&gsi))
227     collect_finally_tree (gsi_stmt (gsi), region);
228 }
229 
230 static void
231 collect_finally_tree (gimple *stmt, gtry *region)
232 {
233   treemple temp;
234 
235   switch (gimple_code (stmt))
236     {
237     case GIMPLE_LABEL:
238       temp.t = gimple_label_label (as_a <glabel *> (stmt));
239       record_in_finally_tree (temp, region);
240       break;
241 
242     case GIMPLE_TRY:
243       if (gimple_try_kind (stmt) == GIMPLE_TRY_FINALLY)
244         {
245           temp.g = stmt;
246           record_in_finally_tree (temp, region);
247           collect_finally_tree_1 (gimple_try_eval (stmt),
248 				  as_a <gtry *> (stmt));
249 	  collect_finally_tree_1 (gimple_try_cleanup (stmt), region);
250         }
251       else if (gimple_try_kind (stmt) == GIMPLE_TRY_CATCH)
252         {
253           collect_finally_tree_1 (gimple_try_eval (stmt), region);
254           collect_finally_tree_1 (gimple_try_cleanup (stmt), region);
255         }
256       break;
257 
258     case GIMPLE_CATCH:
259       collect_finally_tree_1 (gimple_catch_handler (
260 				 as_a <gcatch *> (stmt)),
261 			      region);
262       break;
263 
264     case GIMPLE_EH_FILTER:
265       collect_finally_tree_1 (gimple_eh_filter_failure (stmt), region);
266       break;
267 
268     case GIMPLE_EH_ELSE:
269       {
270 	geh_else *eh_else_stmt = as_a <geh_else *> (stmt);
271 	collect_finally_tree_1 (gimple_eh_else_n_body (eh_else_stmt), region);
272 	collect_finally_tree_1 (gimple_eh_else_e_body (eh_else_stmt), region);
273       }
274       break;
275 
276     default:
277       /* A type, a decl, or some kind of statement that we're not
278 	 interested in.  Don't walk them.  */
279       break;
280     }
281 }
282 
283 
284 /* Use the finally tree to determine if a jump from START to TARGET
285    would leave the try_finally node that START lives in.  */
286 
287 static bool
288 outside_finally_tree (treemple start, gimple *target)
289 {
290   struct finally_tree_node n, *p;
291 
292   do
293     {
294       n.child = start;
295       p = finally_tree->find (&n);
296       if (!p)
297 	return true;
298       start.g = p->parent;
299     }
300   while (start.g != target);
301 
302   return false;
303 }
304 
305 /* Second pass of EH node decomposition.  Actually transform the GIMPLE_TRY
306    nodes into a set of gotos, magic labels, and eh regions.
307    The eh region creation is straight-forward, but frobbing all the gotos
308    and such into shape isn't.  */
309 
310 /* The sequence into which we record all EH stuff.  This will be
311    placed at the end of the function when we're all done.  */
312 static gimple_seq eh_seq;
313 
314 /* Record whether an EH region contains something that can throw,
315    indexed by EH region number.  */
316 static bitmap eh_region_may_contain_throw_map;
317 
318 /* The GOTO_QUEUE is an array of GIMPLE_GOTO and GIMPLE_RETURN
319    statements that are seen to escape this GIMPLE_TRY_FINALLY node.
320    The idea is to record a gimple statement for everything except for
321    the conditionals, which get their labels recorded. Since labels are
322    of type 'tree', we need this node to store both gimple and tree
323    objects.  REPL_STMT is the sequence used to replace the goto/return
324    statement.  CONT_STMT is used to store the statement that allows
325    the return/goto to jump to the original destination. */
326 
327 struct goto_queue_node
328 {
329   treemple stmt;
330   location_t location;
331   gimple_seq repl_stmt;
332   gimple *cont_stmt;
333   int index;
334   /* This is used when index >= 0 to indicate that stmt is a label (as
335      opposed to a goto stmt).  */
336   int is_label;
337 };
338 
339 /* State of the world while lowering.  */
340 
341 struct leh_state
342 {
343   /* What's "current" while constructing the eh region tree.  These
344      correspond to variables of the same name in cfun->eh, which we
345      don't have easy access to.  */
346   eh_region cur_region;
347 
348   /* What's "current" for the purposes of __builtin_eh_pointer.  For
349      a CATCH, this is the associated TRY.  For an EH_FILTER, this is
350      the associated ALLOWED_EXCEPTIONS, etc.  */
351   eh_region ehp_region;
352 
353   /* Processing of TRY_FINALLY requires a bit more state.  This is
354      split out into a separate structure so that we don't have to
355      copy so much when processing other nodes.  */
356   struct leh_tf_state *tf;
357 };
358 
359 struct leh_tf_state
360 {
361   /* Pointer to the GIMPLE_TRY_FINALLY node under discussion.  The
362      try_finally_expr is the original GIMPLE_TRY_FINALLY.  We need to retain
363      this so that outside_finally_tree can reliably reference the tree used
364      in the collect_finally_tree data structures.  */
365   gtry *try_finally_expr;
366   gtry *top_p;
367 
368   /* While lowering a top_p usually it is expanded into multiple statements,
369      thus we need the following field to store them. */
370   gimple_seq top_p_seq;
371 
372   /* The state outside this try_finally node.  */
373   struct leh_state *outer;
374 
375   /* The exception region created for it.  */
376   eh_region region;
377 
378   /* The goto queue.  */
379   struct goto_queue_node *goto_queue;
380   size_t goto_queue_size;
381   size_t goto_queue_active;
382 
383   /* Pointer map to help in searching goto_queue when it is large.  */
384   hash_map<gimple *, goto_queue_node *> *goto_queue_map;
385 
386   /* The set of unique labels seen as entries in the goto queue.  */
387   vec<tree> dest_array;
388 
389   /* A label to be added at the end of the completed transformed
390      sequence.  It will be set if may_fallthru was true *at one time*,
391      though subsequent transformations may have cleared that flag.  */
392   tree fallthru_label;
393 
394   /* True if it is possible to fall out the bottom of the try block.
395      Cleared if the fallthru is converted to a goto.  */
396   bool may_fallthru;
397 
398   /* True if any entry in goto_queue is a GIMPLE_RETURN.  */
399   bool may_return;
400 
401   /* True if the finally block can receive an exception edge.
402      Cleared if the exception case is handled by code duplication.  */
403   bool may_throw;
404 };
405 
406 static gimple_seq lower_eh_must_not_throw (struct leh_state *, gtry *);
407 
408 /* Search for STMT in the goto queue.  Return the replacement,
409    or null if the statement isn't in the queue.  */
410 
411 #define LARGE_GOTO_QUEUE 20
412 
413 static void lower_eh_constructs_1 (struct leh_state *state, gimple_seq *seq);
414 
415 static gimple_seq
416 find_goto_replacement (struct leh_tf_state *tf, treemple stmt)
417 {
418   unsigned int i;
419 
420   if (tf->goto_queue_active < LARGE_GOTO_QUEUE)
421     {
422       for (i = 0; i < tf->goto_queue_active; i++)
423 	if ( tf->goto_queue[i].stmt.g == stmt.g)
424 	  return tf->goto_queue[i].repl_stmt;
425       return NULL;
426     }
427 
428   /* If we have a large number of entries in the goto_queue, create a
429      pointer map and use that for searching.  */
430 
431   if (!tf->goto_queue_map)
432     {
433       tf->goto_queue_map = new hash_map<gimple *, goto_queue_node *>;
434       for (i = 0; i < tf->goto_queue_active; i++)
435 	{
436 	  bool existed = tf->goto_queue_map->put (tf->goto_queue[i].stmt.g,
437 						  &tf->goto_queue[i]);
438 	  gcc_assert (!existed);
439 	}
440     }
441 
442   goto_queue_node **slot = tf->goto_queue_map->get (stmt.g);
443   if (slot != NULL)
444     return ((*slot)->repl_stmt);
445 
446   return NULL;
447 }
448 
449 /* A subroutine of replace_goto_queue_1.  Handles the sub-clauses of a
450    lowered GIMPLE_COND.  If, by chance, the replacement is a simple goto,
451    then we can just splat it in, otherwise we add the new stmts immediately
452    after the GIMPLE_COND and redirect.  */
453 
454 static void
455 replace_goto_queue_cond_clause (tree *tp, struct leh_tf_state *tf,
456 				gimple_stmt_iterator *gsi)
457 {
458   tree label;
459   gimple_seq new_seq;
460   treemple temp;
461   location_t loc = gimple_location (gsi_stmt (*gsi));
462 
463   temp.tp = tp;
464   new_seq = find_goto_replacement (tf, temp);
465   if (!new_seq)
466     return;
467 
468   if (gimple_seq_singleton_p (new_seq)
469       && gimple_code (gimple_seq_first_stmt (new_seq)) == GIMPLE_GOTO)
470     {
471       *tp = gimple_goto_dest (gimple_seq_first_stmt (new_seq));
472       return;
473     }
474 
475   label = create_artificial_label (loc);
476   /* Set the new label for the GIMPLE_COND */
477   *tp = label;
478 
479   gsi_insert_after (gsi, gimple_build_label (label), GSI_CONTINUE_LINKING);
480   gsi_insert_seq_after (gsi, gimple_seq_copy (new_seq), GSI_CONTINUE_LINKING);
481 }
482 
483 /* The real work of replace_goto_queue.  Returns with TSI updated to
484    point to the next statement.  */
485 
486 static void replace_goto_queue_stmt_list (gimple_seq *, struct leh_tf_state *);
487 
488 static void
489 replace_goto_queue_1 (gimple *stmt, struct leh_tf_state *tf,
490 		      gimple_stmt_iterator *gsi)
491 {
492   gimple_seq seq;
493   treemple temp;
494   temp.g = NULL;
495 
496   switch (gimple_code (stmt))
497     {
498     case GIMPLE_GOTO:
499     case GIMPLE_RETURN:
500       temp.g = stmt;
501       seq = find_goto_replacement (tf, temp);
502       if (seq)
503 	{
504 	  gsi_insert_seq_before (gsi, gimple_seq_copy (seq), GSI_SAME_STMT);
505 	  gsi_remove (gsi, false);
506 	  return;
507 	}
508       break;
509 
510     case GIMPLE_COND:
511       replace_goto_queue_cond_clause (gimple_op_ptr (stmt, 2), tf, gsi);
512       replace_goto_queue_cond_clause (gimple_op_ptr (stmt, 3), tf, gsi);
513       break;
514 
515     case GIMPLE_TRY:
516       replace_goto_queue_stmt_list (gimple_try_eval_ptr (stmt), tf);
517       replace_goto_queue_stmt_list (gimple_try_cleanup_ptr (stmt), tf);
518       break;
519     case GIMPLE_CATCH:
520       replace_goto_queue_stmt_list (gimple_catch_handler_ptr (
521 				      as_a <gcatch *> (stmt)),
522 				    tf);
523       break;
524     case GIMPLE_EH_FILTER:
525       replace_goto_queue_stmt_list (gimple_eh_filter_failure_ptr (stmt), tf);
526       break;
527     case GIMPLE_EH_ELSE:
528       {
529 	geh_else *eh_else_stmt = as_a <geh_else *> (stmt);
530 	replace_goto_queue_stmt_list (gimple_eh_else_n_body_ptr (eh_else_stmt),
531 				      tf);
532 	replace_goto_queue_stmt_list (gimple_eh_else_e_body_ptr (eh_else_stmt),
533 				      tf);
534       }
535       break;
536 
537     default:
538       /* These won't have gotos in them.  */
539       break;
540     }
541 
542   gsi_next (gsi);
543 }
544 
545 /* A subroutine of replace_goto_queue.  Handles GIMPLE_SEQ.  */
546 
547 static void
548 replace_goto_queue_stmt_list (gimple_seq *seq, struct leh_tf_state *tf)
549 {
550   gimple_stmt_iterator gsi = gsi_start (*seq);
551 
552   while (!gsi_end_p (gsi))
553     replace_goto_queue_1 (gsi_stmt (gsi), tf, &gsi);
554 }
555 
556 /* Replace all goto queue members.  */
557 
558 static void
559 replace_goto_queue (struct leh_tf_state *tf)
560 {
561   if (tf->goto_queue_active == 0)
562     return;
563   replace_goto_queue_stmt_list (&tf->top_p_seq, tf);
564   replace_goto_queue_stmt_list (&eh_seq, tf);
565 }
566 
567 /* Add a new record to the goto queue contained in TF. NEW_STMT is the
568    data to be added, IS_LABEL indicates whether NEW_STMT is a label or
569    a gimple return. */
570 
571 static void
572 record_in_goto_queue (struct leh_tf_state *tf,
573                       treemple new_stmt,
574                       int index,
575                       bool is_label,
576 		      location_t location)
577 {
578   size_t active, size;
579   struct goto_queue_node *q;
580 
581   gcc_assert (!tf->goto_queue_map);
582 
583   active = tf->goto_queue_active;
584   size = tf->goto_queue_size;
585   if (active >= size)
586     {
587       size = (size ? size * 2 : 32);
588       tf->goto_queue_size = size;
589       tf->goto_queue
590          = XRESIZEVEC (struct goto_queue_node, tf->goto_queue, size);
591     }
592 
593   q = &tf->goto_queue[active];
594   tf->goto_queue_active = active + 1;
595 
596   memset (q, 0, sizeof (*q));
597   q->stmt = new_stmt;
598   q->index = index;
599   q->location = location;
600   q->is_label = is_label;
601 }
602 
603 /* Record the LABEL label in the goto queue contained in TF.
604    TF is not null.  */
605 
606 static void
607 record_in_goto_queue_label (struct leh_tf_state *tf, treemple stmt, tree label,
608 			    location_t location)
609 {
610   int index;
611   treemple temp, new_stmt;
612 
613   if (!label)
614     return;
615 
616   /* Computed and non-local gotos do not get processed.  Given
617      their nature we can neither tell whether we've escaped the
618      finally block nor redirect them if we knew.  */
619   if (TREE_CODE (label) != LABEL_DECL)
620     return;
621 
622   /* No need to record gotos that don't leave the try block.  */
623   temp.t = label;
624   if (!outside_finally_tree (temp, tf->try_finally_expr))
625     return;
626 
627   if (! tf->dest_array.exists ())
628     {
629       tf->dest_array.create (10);
630       tf->dest_array.quick_push (label);
631       index = 0;
632     }
633   else
634     {
635       int n = tf->dest_array.length ();
636       for (index = 0; index < n; ++index)
637         if (tf->dest_array[index] == label)
638           break;
639       if (index == n)
640         tf->dest_array.safe_push (label);
641     }
642 
643   /* In the case of a GOTO we want to record the destination label,
644      since with a GIMPLE_COND we have an easy access to the then/else
645      labels. */
646   new_stmt = stmt;
647   record_in_goto_queue (tf, new_stmt, index, true, location);
648 }
649 
650 /* For any GIMPLE_GOTO or GIMPLE_RETURN, decide whether it leaves a try_finally
651    node, and if so record that fact in the goto queue associated with that
652    try_finally node.  */
653 
654 static void
655 maybe_record_in_goto_queue (struct leh_state *state, gimple *stmt)
656 {
657   struct leh_tf_state *tf = state->tf;
658   treemple new_stmt;
659 
660   if (!tf)
661     return;
662 
663   switch (gimple_code (stmt))
664     {
665     case GIMPLE_COND:
666       {
667 	gcond *cond_stmt = as_a <gcond *> (stmt);
668 	new_stmt.tp = gimple_op_ptr (cond_stmt, 2);
669 	record_in_goto_queue_label (tf, new_stmt,
670 				    gimple_cond_true_label (cond_stmt),
671 				    EXPR_LOCATION (*new_stmt.tp));
672 	new_stmt.tp = gimple_op_ptr (cond_stmt, 3);
673 	record_in_goto_queue_label (tf, new_stmt,
674 				    gimple_cond_false_label (cond_stmt),
675 				    EXPR_LOCATION (*new_stmt.tp));
676       }
677       break;
678     case GIMPLE_GOTO:
679       new_stmt.g = stmt;
680       record_in_goto_queue_label (tf, new_stmt, gimple_goto_dest (stmt),
681 				  gimple_location (stmt));
682       break;
683 
684     case GIMPLE_RETURN:
685       tf->may_return = true;
686       new_stmt.g = stmt;
687       record_in_goto_queue (tf, new_stmt, -1, false, gimple_location (stmt));
688       break;
689 
690     default:
691       gcc_unreachable ();
692     }
693 }
694 
695 
696 #if CHECKING_P
697 /* We do not process GIMPLE_SWITCHes for now.  As long as the original source
698    was in fact structured, and we've not yet done jump threading, then none
699    of the labels will leave outer GIMPLE_TRY_FINALLY nodes. Verify this.  */
700 
701 static void
702 verify_norecord_switch_expr (struct leh_state *state,
703 			     gswitch *switch_expr)
704 {
705   struct leh_tf_state *tf = state->tf;
706   size_t i, n;
707 
708   if (!tf)
709     return;
710 
711   n = gimple_switch_num_labels (switch_expr);
712 
713   for (i = 0; i < n; ++i)
714     {
715       treemple temp;
716       tree lab = CASE_LABEL (gimple_switch_label (switch_expr, i));
717       temp.t = lab;
718       gcc_assert (!outside_finally_tree (temp, tf->try_finally_expr));
719     }
720 }
721 #else
722 #define verify_norecord_switch_expr(state, switch_expr)
723 #endif
724 
725 /* Redirect a RETURN_EXPR pointed to by Q to FINLAB.  If MOD is
726    non-null, insert it before the new branch.  */
727 
728 static void
729 do_return_redirection (struct goto_queue_node *q, tree finlab, gimple_seq mod)
730 {
731   gimple *x;
732 
733   /* In the case of a return, the queue node must be a gimple statement.  */
734   gcc_assert (!q->is_label);
735 
736   /* Note that the return value may have already been computed, e.g.,
737 
738 	int x;
739 	int foo (void)
740 	{
741 	  x = 0;
742 	  try {
743 	    return x;
744 	  } finally {
745 	    x++;
746 	  }
747 	}
748 
749      should return 0, not 1.  We don't have to do anything to make
750      this happens because the return value has been placed in the
751      RESULT_DECL already.  */
752 
753   q->cont_stmt = q->stmt.g;
754 
755   if (mod)
756     gimple_seq_add_seq (&q->repl_stmt, mod);
757 
758   x = gimple_build_goto (finlab);
759   gimple_set_location (x, q->location);
760   gimple_seq_add_stmt (&q->repl_stmt, x);
761 }
762 
763 /* Similar, but easier, for GIMPLE_GOTO.  */
764 
765 static void
766 do_goto_redirection (struct goto_queue_node *q, tree finlab, gimple_seq mod,
767 		     struct leh_tf_state *tf)
768 {
769   ggoto *x;
770 
771   gcc_assert (q->is_label);
772 
773   q->cont_stmt = gimple_build_goto (tf->dest_array[q->index]);
774 
775   if (mod)
776     gimple_seq_add_seq (&q->repl_stmt, mod);
777 
778   x = gimple_build_goto (finlab);
779   gimple_set_location (x, q->location);
780   gimple_seq_add_stmt (&q->repl_stmt, x);
781 }
782 
783 /* Emit a standard landing pad sequence into SEQ for REGION.  */
784 
785 static void
786 emit_post_landing_pad (gimple_seq *seq, eh_region region)
787 {
788   eh_landing_pad lp = region->landing_pads;
789   glabel *x;
790 
791   if (lp == NULL)
792     lp = gen_eh_landing_pad (region);
793 
794   lp->post_landing_pad = create_artificial_label (UNKNOWN_LOCATION);
795   EH_LANDING_PAD_NR (lp->post_landing_pad) = lp->index;
796 
797   x = gimple_build_label (lp->post_landing_pad);
798   gimple_seq_add_stmt (seq, x);
799 }
800 
801 /* Emit a RESX statement into SEQ for REGION.  */
802 
803 static void
804 emit_resx (gimple_seq *seq, eh_region region)
805 {
806   gresx *x = gimple_build_resx (region->index);
807   gimple_seq_add_stmt (seq, x);
808   if (region->outer)
809     record_stmt_eh_region (region->outer, x);
810 }
811 
812 /* Emit an EH_DISPATCH statement into SEQ for REGION.  */
813 
814 static void
815 emit_eh_dispatch (gimple_seq *seq, eh_region region)
816 {
817   geh_dispatch *x = gimple_build_eh_dispatch (region->index);
818   gimple_seq_add_stmt (seq, x);
819 }
820 
821 /* Note that the current EH region may contain a throw, or a
822    call to a function which itself may contain a throw.  */
823 
824 static void
825 note_eh_region_may_contain_throw (eh_region region)
826 {
827   while (bitmap_set_bit (eh_region_may_contain_throw_map, region->index))
828     {
829       if (region->type == ERT_MUST_NOT_THROW)
830 	break;
831       region = region->outer;
832       if (region == NULL)
833 	break;
834     }
835 }
836 
837 /* Check if REGION has been marked as containing a throw.  If REGION is
838    NULL, this predicate is false.  */
839 
840 static inline bool
841 eh_region_may_contain_throw (eh_region r)
842 {
843   return r && bitmap_bit_p (eh_region_may_contain_throw_map, r->index);
844 }
845 
846 /* We want to transform
847 	try { body; } catch { stuff; }
848    to
849 	normal_sequence:
850 	  body;
851 	  over:
852 	eh_sequence:
853 	  landing_pad:
854 	  stuff;
855 	  goto over;
856 
857    TP is a GIMPLE_TRY node.  REGION is the region whose post_landing_pad
858    should be placed before the second operand, or NULL.  OVER is
859    an existing label that should be put at the exit, or NULL.  */
860 
861 static gimple_seq
862 frob_into_branch_around (gtry *tp, eh_region region, tree over)
863 {
864   gimple *x;
865   gimple_seq cleanup, result;
866   location_t loc = gimple_location (tp);
867 
868   cleanup = gimple_try_cleanup (tp);
869   result = gimple_try_eval (tp);
870 
871   if (region)
872     emit_post_landing_pad (&eh_seq, region);
873 
874   if (gimple_seq_may_fallthru (cleanup))
875     {
876       if (!over)
877 	over = create_artificial_label (loc);
878       x = gimple_build_goto (over);
879       gimple_set_location (x, loc);
880       gimple_seq_add_stmt (&cleanup, x);
881     }
882   gimple_seq_add_seq (&eh_seq, cleanup);
883 
884   if (over)
885     {
886       x = gimple_build_label (over);
887       gimple_seq_add_stmt (&result, x);
888     }
889   return result;
890 }
891 
892 /* A subroutine of lower_try_finally.  Duplicate the tree rooted at T.
893    Make sure to record all new labels found.  */
894 
895 static gimple_seq
896 lower_try_finally_dup_block (gimple_seq seq, struct leh_state *outer_state,
897 			     location_t loc)
898 {
899   gtry *region = NULL;
900   gimple_seq new_seq;
901   gimple_stmt_iterator gsi;
902 
903   new_seq = copy_gimple_seq_and_replace_locals (seq);
904 
905   for (gsi = gsi_start (new_seq); !gsi_end_p (gsi); gsi_next (&gsi))
906     {
907       gimple *stmt = gsi_stmt (gsi);
908       /* We duplicate __builtin_stack_restore at -O0 in the hope of eliminating
909 	 it on the EH paths.  When it is not eliminated, make it transparent in
910 	 the debug info.  */
911       if (gimple_call_builtin_p (stmt, BUILT_IN_STACK_RESTORE))
912 	gimple_set_location (stmt, UNKNOWN_LOCATION);
913       else if (LOCATION_LOCUS (gimple_location (stmt)) == UNKNOWN_LOCATION)
914 	{
915 	  tree block = gimple_block (stmt);
916 	  gimple_set_location (stmt, loc);
917 	  gimple_set_block (stmt, block);
918 	}
919     }
920 
921   if (outer_state->tf)
922     region = outer_state->tf->try_finally_expr;
923   collect_finally_tree_1 (new_seq, region);
924 
925   return new_seq;
926 }
927 
928 /* A subroutine of lower_try_finally.  Create a fallthru label for
929    the given try_finally state.  The only tricky bit here is that
930    we have to make sure to record the label in our outer context.  */
931 
932 static tree
933 lower_try_finally_fallthru_label (struct leh_tf_state *tf)
934 {
935   tree label = tf->fallthru_label;
936   treemple temp;
937 
938   if (!label)
939     {
940       label = create_artificial_label (gimple_location (tf->try_finally_expr));
941       tf->fallthru_label = label;
942       if (tf->outer->tf)
943         {
944           temp.t = label;
945           record_in_finally_tree (temp, tf->outer->tf->try_finally_expr);
946         }
947     }
948   return label;
949 }
950 
951 /* A subroutine of lower_try_finally.  If FINALLY consits of a
952    GIMPLE_EH_ELSE node, return it.  */
953 
954 static inline geh_else *
955 get_eh_else (gimple_seq finally)
956 {
957   gimple *x = gimple_seq_first_stmt (finally);
958   if (gimple_code (x) == GIMPLE_EH_ELSE)
959     {
960       gcc_assert (gimple_seq_singleton_p (finally));
961       return as_a <geh_else *> (x);
962     }
963   return NULL;
964 }
965 
966 /* A subroutine of lower_try_finally.  If the eh_protect_cleanup_actions
967    langhook returns non-null, then the language requires that the exception
968    path out of a try_finally be treated specially.  To wit: the code within
969    the finally block may not itself throw an exception.  We have two choices
970    here. First we can duplicate the finally block and wrap it in a
971    must_not_throw region.  Second, we can generate code like
972 
973 	try {
974 	  finally_block;
975 	} catch {
976 	  if (fintmp == eh_edge)
977 	    protect_cleanup_actions;
978 	}
979 
980    where "fintmp" is the temporary used in the switch statement generation
981    alternative considered below.  For the nonce, we always choose the first
982    option.
983 
984    THIS_STATE may be null if this is a try-cleanup, not a try-finally.  */
985 
986 static void
987 honor_protect_cleanup_actions (struct leh_state *outer_state,
988 			       struct leh_state *this_state,
989 			       struct leh_tf_state *tf)
990 {
991   gimple_seq finally = gimple_try_cleanup (tf->top_p);
992 
993   /* EH_ELSE doesn't come from user code; only compiler generated stuff.
994      It does need to be handled here, so as to separate the (different)
995      EH path from the normal path.  But we should not attempt to wrap
996      it with a must-not-throw node (which indeed gets in the way).  */
997   if (geh_else *eh_else = get_eh_else (finally))
998     {
999       gimple_try_set_cleanup (tf->top_p, gimple_eh_else_n_body (eh_else));
1000       finally = gimple_eh_else_e_body (eh_else);
1001 
1002       /* Let the ELSE see the exception that's being processed.  */
1003       eh_region save_ehp = this_state->ehp_region;
1004       this_state->ehp_region = this_state->cur_region;
1005       lower_eh_constructs_1 (this_state, &finally);
1006       this_state->ehp_region = save_ehp;
1007     }
1008   else
1009     {
1010       /* First check for nothing to do.  */
1011       if (lang_hooks.eh_protect_cleanup_actions == NULL)
1012 	return;
1013       tree actions = lang_hooks.eh_protect_cleanup_actions ();
1014       if (actions == NULL)
1015 	return;
1016 
1017       if (this_state)
1018 	finally = lower_try_finally_dup_block (finally, outer_state,
1019 	  gimple_location (tf->try_finally_expr));
1020 
1021       /* If this cleanup consists of a TRY_CATCH_EXPR with TRY_CATCH_IS_CLEANUP
1022 	 set, the handler of the TRY_CATCH_EXPR is another cleanup which ought
1023 	 to be in an enclosing scope, but needs to be implemented at this level
1024 	 to avoid a nesting violation (see wrap_temporary_cleanups in
1025 	 cp/decl.c).  Since it's logically at an outer level, we should call
1026 	 terminate before we get to it, so strip it away before adding the
1027 	 MUST_NOT_THROW filter.  */
1028       gimple_stmt_iterator gsi = gsi_start (finally);
1029       gimple *x = gsi_stmt (gsi);
1030       if (gimple_code (x) == GIMPLE_TRY
1031 	  && gimple_try_kind (x) == GIMPLE_TRY_CATCH
1032 	  && gimple_try_catch_is_cleanup (x))
1033 	{
1034 	  gsi_insert_seq_before (&gsi, gimple_try_eval (x), GSI_SAME_STMT);
1035 	  gsi_remove (&gsi, false);
1036 	}
1037 
1038       /* Wrap the block with protect_cleanup_actions as the action.  */
1039       geh_mnt *eh_mnt = gimple_build_eh_must_not_throw (actions);
1040       gtry *try_stmt = gimple_build_try (finally,
1041 					 gimple_seq_alloc_with_stmt (eh_mnt),
1042 					 GIMPLE_TRY_CATCH);
1043       finally = lower_eh_must_not_throw (outer_state, try_stmt);
1044     }
1045 
1046   /* Drop all of this into the exception sequence.  */
1047   emit_post_landing_pad (&eh_seq, tf->region);
1048   gimple_seq_add_seq (&eh_seq, finally);
1049   if (gimple_seq_may_fallthru (finally))
1050     emit_resx (&eh_seq, tf->region);
1051 
1052   /* Having now been handled, EH isn't to be considered with
1053      the rest of the outgoing edges.  */
1054   tf->may_throw = false;
1055 }
1056 
1057 /* A subroutine of lower_try_finally.  We have determined that there is
1058    no fallthru edge out of the finally block.  This means that there is
1059    no outgoing edge corresponding to any incoming edge.  Restructure the
1060    try_finally node for this special case.  */
1061 
1062 static void
1063 lower_try_finally_nofallthru (struct leh_state *state,
1064 			      struct leh_tf_state *tf)
1065 {
1066   tree lab;
1067   gimple *x;
1068   geh_else *eh_else;
1069   gimple_seq finally;
1070   struct goto_queue_node *q, *qe;
1071 
1072   lab = create_artificial_label (gimple_location (tf->try_finally_expr));
1073 
1074   /* We expect that tf->top_p is a GIMPLE_TRY. */
1075   finally = gimple_try_cleanup (tf->top_p);
1076   tf->top_p_seq = gimple_try_eval (tf->top_p);
1077 
1078   x = gimple_build_label (lab);
1079   gimple_seq_add_stmt (&tf->top_p_seq, x);
1080 
1081   q = tf->goto_queue;
1082   qe = q + tf->goto_queue_active;
1083   for (; q < qe; ++q)
1084     if (q->index < 0)
1085       do_return_redirection (q, lab, NULL);
1086     else
1087       do_goto_redirection (q, lab, NULL, tf);
1088 
1089   replace_goto_queue (tf);
1090 
1091   /* Emit the finally block into the stream.  Lower EH_ELSE at this time.  */
1092   eh_else = get_eh_else (finally);
1093   if (eh_else)
1094     {
1095       finally = gimple_eh_else_n_body (eh_else);
1096       lower_eh_constructs_1 (state, &finally);
1097       gimple_seq_add_seq (&tf->top_p_seq, finally);
1098 
1099       if (tf->may_throw)
1100 	{
1101 	  finally = gimple_eh_else_e_body (eh_else);
1102 	  lower_eh_constructs_1 (state, &finally);
1103 
1104 	  emit_post_landing_pad (&eh_seq, tf->region);
1105 	  gimple_seq_add_seq (&eh_seq, finally);
1106 	}
1107     }
1108   else
1109     {
1110       lower_eh_constructs_1 (state, &finally);
1111       gimple_seq_add_seq (&tf->top_p_seq, finally);
1112 
1113       if (tf->may_throw)
1114 	{
1115 	  emit_post_landing_pad (&eh_seq, tf->region);
1116 
1117 	  x = gimple_build_goto (lab);
1118 	  gimple_set_location (x, gimple_location (tf->try_finally_expr));
1119 	  gimple_seq_add_stmt (&eh_seq, x);
1120 	}
1121     }
1122 }
1123 
1124 /* A subroutine of lower_try_finally.  We have determined that there is
1125    exactly one destination of the finally block.  Restructure the
1126    try_finally node for this special case.  */
1127 
1128 static void
1129 lower_try_finally_onedest (struct leh_state *state, struct leh_tf_state *tf)
1130 {
1131   struct goto_queue_node *q, *qe;
1132   geh_else *eh_else;
1133   glabel *label_stmt;
1134   gimple *x;
1135   gimple_seq finally;
1136   gimple_stmt_iterator gsi;
1137   tree finally_label;
1138   location_t loc = gimple_location (tf->try_finally_expr);
1139 
1140   finally = gimple_try_cleanup (tf->top_p);
1141   tf->top_p_seq = gimple_try_eval (tf->top_p);
1142 
1143   /* Since there's only one destination, and the destination edge can only
1144      either be EH or non-EH, that implies that all of our incoming edges
1145      are of the same type.  Therefore we can lower EH_ELSE immediately.  */
1146   eh_else = get_eh_else (finally);
1147   if (eh_else)
1148     {
1149       if (tf->may_throw)
1150 	finally = gimple_eh_else_e_body (eh_else);
1151       else
1152 	finally = gimple_eh_else_n_body (eh_else);
1153     }
1154 
1155   lower_eh_constructs_1 (state, &finally);
1156 
1157   for (gsi = gsi_start (finally); !gsi_end_p (gsi); gsi_next (&gsi))
1158     {
1159       gimple *stmt = gsi_stmt (gsi);
1160       if (LOCATION_LOCUS (gimple_location (stmt)) == UNKNOWN_LOCATION)
1161 	{
1162 	  tree block = gimple_block (stmt);
1163 	  gimple_set_location (stmt, gimple_location (tf->try_finally_expr));
1164 	  gimple_set_block (stmt, block);
1165 	}
1166     }
1167 
1168   if (tf->may_throw)
1169     {
1170       /* Only reachable via the exception edge.  Add the given label to
1171          the head of the FINALLY block.  Append a RESX at the end.  */
1172       emit_post_landing_pad (&eh_seq, tf->region);
1173       gimple_seq_add_seq (&eh_seq, finally);
1174       emit_resx (&eh_seq, tf->region);
1175       return;
1176     }
1177 
1178   if (tf->may_fallthru)
1179     {
1180       /* Only reachable via the fallthru edge.  Do nothing but let
1181 	 the two blocks run together; we'll fall out the bottom.  */
1182       gimple_seq_add_seq (&tf->top_p_seq, finally);
1183       return;
1184     }
1185 
1186   finally_label = create_artificial_label (loc);
1187   label_stmt = gimple_build_label (finally_label);
1188   gimple_seq_add_stmt (&tf->top_p_seq, label_stmt);
1189 
1190   gimple_seq_add_seq (&tf->top_p_seq, finally);
1191 
1192   q = tf->goto_queue;
1193   qe = q + tf->goto_queue_active;
1194 
1195   if (tf->may_return)
1196     {
1197       /* Reachable by return expressions only.  Redirect them.  */
1198       for (; q < qe; ++q)
1199 	do_return_redirection (q, finally_label, NULL);
1200       replace_goto_queue (tf);
1201     }
1202   else
1203     {
1204       /* Reachable by goto expressions only.  Redirect them.  */
1205       for (; q < qe; ++q)
1206 	do_goto_redirection (q, finally_label, NULL, tf);
1207       replace_goto_queue (tf);
1208 
1209       if (tf->dest_array[0] == tf->fallthru_label)
1210 	{
1211 	  /* Reachable by goto to fallthru label only.  Redirect it
1212 	     to the new label (already created, sadly), and do not
1213 	     emit the final branch out, or the fallthru label.  */
1214 	  tf->fallthru_label = NULL;
1215 	  return;
1216 	}
1217     }
1218 
1219   /* Place the original return/goto to the original destination
1220      immediately after the finally block. */
1221   x = tf->goto_queue[0].cont_stmt;
1222   gimple_seq_add_stmt (&tf->top_p_seq, x);
1223   maybe_record_in_goto_queue (state, x);
1224 }
1225 
1226 /* A subroutine of lower_try_finally.  There are multiple edges incoming
1227    and outgoing from the finally block.  Implement this by duplicating the
1228    finally block for every destination.  */
1229 
1230 static void
1231 lower_try_finally_copy (struct leh_state *state, struct leh_tf_state *tf)
1232 {
1233   gimple_seq finally;
1234   gimple_seq new_stmt;
1235   gimple_seq seq;
1236   gimple *x;
1237   geh_else *eh_else;
1238   tree tmp;
1239   location_t tf_loc = gimple_location (tf->try_finally_expr);
1240 
1241   finally = gimple_try_cleanup (tf->top_p);
1242 
1243   /* Notice EH_ELSE, and simplify some of the remaining code
1244      by considering FINALLY to be the normal return path only.  */
1245   eh_else = get_eh_else (finally);
1246   if (eh_else)
1247     finally = gimple_eh_else_n_body (eh_else);
1248 
1249   tf->top_p_seq = gimple_try_eval (tf->top_p);
1250   new_stmt = NULL;
1251 
1252   if (tf->may_fallthru)
1253     {
1254       seq = lower_try_finally_dup_block (finally, state, tf_loc);
1255       lower_eh_constructs_1 (state, &seq);
1256       gimple_seq_add_seq (&new_stmt, seq);
1257 
1258       tmp = lower_try_finally_fallthru_label (tf);
1259       x = gimple_build_goto (tmp);
1260       gimple_set_location (x, tf_loc);
1261       gimple_seq_add_stmt (&new_stmt, x);
1262     }
1263 
1264   if (tf->may_throw)
1265     {
1266       /* We don't need to copy the EH path of EH_ELSE,
1267 	 since it is only emitted once.  */
1268       if (eh_else)
1269 	seq = gimple_eh_else_e_body (eh_else);
1270       else
1271 	seq = lower_try_finally_dup_block (finally, state, tf_loc);
1272       lower_eh_constructs_1 (state, &seq);
1273 
1274       emit_post_landing_pad (&eh_seq, tf->region);
1275       gimple_seq_add_seq (&eh_seq, seq);
1276       emit_resx (&eh_seq, tf->region);
1277     }
1278 
1279   if (tf->goto_queue)
1280     {
1281       struct goto_queue_node *q, *qe;
1282       int return_index, index;
1283       struct labels_s
1284       {
1285 	struct goto_queue_node *q;
1286 	tree label;
1287       } *labels;
1288 
1289       return_index = tf->dest_array.length ();
1290       labels = XCNEWVEC (struct labels_s, return_index + 1);
1291 
1292       q = tf->goto_queue;
1293       qe = q + tf->goto_queue_active;
1294       for (; q < qe; q++)
1295 	{
1296 	  index = q->index < 0 ? return_index : q->index;
1297 
1298 	  if (!labels[index].q)
1299 	    labels[index].q = q;
1300 	}
1301 
1302       for (index = 0; index < return_index + 1; index++)
1303 	{
1304 	  tree lab;
1305 
1306 	  q = labels[index].q;
1307 	  if (! q)
1308 	    continue;
1309 
1310 	  lab = labels[index].label
1311 	    = create_artificial_label (tf_loc);
1312 
1313 	  if (index == return_index)
1314 	    do_return_redirection (q, lab, NULL);
1315 	  else
1316 	    do_goto_redirection (q, lab, NULL, tf);
1317 
1318 	  x = gimple_build_label (lab);
1319           gimple_seq_add_stmt (&new_stmt, x);
1320 
1321 	  seq = lower_try_finally_dup_block (finally, state, q->location);
1322 	  lower_eh_constructs_1 (state, &seq);
1323           gimple_seq_add_seq (&new_stmt, seq);
1324 
1325           gimple_seq_add_stmt (&new_stmt, q->cont_stmt);
1326 	  maybe_record_in_goto_queue (state, q->cont_stmt);
1327 	}
1328 
1329       for (q = tf->goto_queue; q < qe; q++)
1330 	{
1331 	  tree lab;
1332 
1333 	  index = q->index < 0 ? return_index : q->index;
1334 
1335 	  if (labels[index].q == q)
1336 	    continue;
1337 
1338 	  lab = labels[index].label;
1339 
1340 	  if (index == return_index)
1341 	    do_return_redirection (q, lab, NULL);
1342 	  else
1343 	    do_goto_redirection (q, lab, NULL, tf);
1344 	}
1345 
1346       replace_goto_queue (tf);
1347       free (labels);
1348     }
1349 
1350   /* Need to link new stmts after running replace_goto_queue due
1351      to not wanting to process the same goto stmts twice.  */
1352   gimple_seq_add_seq (&tf->top_p_seq, new_stmt);
1353 }
1354 
1355 /* A subroutine of lower_try_finally.  There are multiple edges incoming
1356    and outgoing from the finally block.  Implement this by instrumenting
1357    each incoming edge and creating a switch statement at the end of the
1358    finally block that branches to the appropriate destination.  */
1359 
1360 static void
1361 lower_try_finally_switch (struct leh_state *state, struct leh_tf_state *tf)
1362 {
1363   struct goto_queue_node *q, *qe;
1364   tree finally_tmp, finally_label;
1365   int return_index, eh_index, fallthru_index;
1366   int nlabels, ndests, j, last_case_index;
1367   tree last_case;
1368   auto_vec<tree> case_label_vec;
1369   gimple_seq switch_body = NULL;
1370   gimple *x;
1371   geh_else *eh_else;
1372   tree tmp;
1373   gimple *switch_stmt;
1374   gimple_seq finally;
1375   hash_map<tree, gimple *> *cont_map = NULL;
1376   /* The location of the TRY_FINALLY stmt.  */
1377   location_t tf_loc = gimple_location (tf->try_finally_expr);
1378   /* The location of the finally block.  */
1379   location_t finally_loc;
1380 
1381   finally = gimple_try_cleanup (tf->top_p);
1382   eh_else = get_eh_else (finally);
1383 
1384   /* Mash the TRY block to the head of the chain.  */
1385   tf->top_p_seq = gimple_try_eval (tf->top_p);
1386 
1387   /* The location of the finally is either the last stmt in the finally
1388      block or the location of the TRY_FINALLY itself.  */
1389   x = gimple_seq_last_stmt (finally);
1390   finally_loc = x ? gimple_location (x) : tf_loc;
1391 
1392   /* Prepare for switch statement generation.  */
1393   nlabels = tf->dest_array.length ();
1394   return_index = nlabels;
1395   eh_index = return_index + tf->may_return;
1396   fallthru_index = eh_index + (tf->may_throw && !eh_else);
1397   ndests = fallthru_index + tf->may_fallthru;
1398 
1399   finally_tmp = create_tmp_var (integer_type_node, "finally_tmp");
1400   finally_label = create_artificial_label (finally_loc);
1401 
1402   /* We use vec::quick_push on case_label_vec throughout this function,
1403      since we know the size in advance and allocate precisely as muce
1404      space as needed.  */
1405   case_label_vec.create (ndests);
1406   last_case = NULL;
1407   last_case_index = 0;
1408 
1409   /* Begin inserting code for getting to the finally block.  Things
1410      are done in this order to correspond to the sequence the code is
1411      laid out.  */
1412 
1413   if (tf->may_fallthru)
1414     {
1415       x = gimple_build_assign (finally_tmp,
1416 			       build_int_cst (integer_type_node,
1417 					      fallthru_index));
1418       gimple_seq_add_stmt (&tf->top_p_seq, x);
1419 
1420       tmp = build_int_cst (integer_type_node, fallthru_index);
1421       last_case = build_case_label (tmp, NULL,
1422 				    create_artificial_label (tf_loc));
1423       case_label_vec.quick_push (last_case);
1424       last_case_index++;
1425 
1426       x = gimple_build_label (CASE_LABEL (last_case));
1427       gimple_seq_add_stmt (&switch_body, x);
1428 
1429       tmp = lower_try_finally_fallthru_label (tf);
1430       x = gimple_build_goto (tmp);
1431       gimple_set_location (x, tf_loc);
1432       gimple_seq_add_stmt (&switch_body, x);
1433     }
1434 
1435   /* For EH_ELSE, emit the exception path (plus resx) now, then
1436      subsequently we only need consider the normal path.  */
1437   if (eh_else)
1438     {
1439       if (tf->may_throw)
1440 	{
1441 	  finally = gimple_eh_else_e_body (eh_else);
1442 	  lower_eh_constructs_1 (state, &finally);
1443 
1444 	  emit_post_landing_pad (&eh_seq, tf->region);
1445 	  gimple_seq_add_seq (&eh_seq, finally);
1446 	  emit_resx (&eh_seq, tf->region);
1447 	}
1448 
1449       finally = gimple_eh_else_n_body (eh_else);
1450     }
1451   else if (tf->may_throw)
1452     {
1453       emit_post_landing_pad (&eh_seq, tf->region);
1454 
1455       x = gimple_build_assign (finally_tmp,
1456 			       build_int_cst (integer_type_node, eh_index));
1457       gimple_seq_add_stmt (&eh_seq, x);
1458 
1459       x = gimple_build_goto (finally_label);
1460       gimple_set_location (x, tf_loc);
1461       gimple_seq_add_stmt (&eh_seq, x);
1462 
1463       tmp = build_int_cst (integer_type_node, eh_index);
1464       last_case = build_case_label (tmp, NULL,
1465 				    create_artificial_label (tf_loc));
1466       case_label_vec.quick_push (last_case);
1467       last_case_index++;
1468 
1469       x = gimple_build_label (CASE_LABEL (last_case));
1470       gimple_seq_add_stmt (&eh_seq, x);
1471       emit_resx (&eh_seq, tf->region);
1472     }
1473 
1474   x = gimple_build_label (finally_label);
1475   gimple_seq_add_stmt (&tf->top_p_seq, x);
1476 
1477   lower_eh_constructs_1 (state, &finally);
1478   gimple_seq_add_seq (&tf->top_p_seq, finally);
1479 
1480   /* Redirect each incoming goto edge.  */
1481   q = tf->goto_queue;
1482   qe = q + tf->goto_queue_active;
1483   j = last_case_index + tf->may_return;
1484   /* Prepare the assignments to finally_tmp that are executed upon the
1485      entrance through a particular edge. */
1486   for (; q < qe; ++q)
1487     {
1488       gimple_seq mod = NULL;
1489       int switch_id;
1490       unsigned int case_index;
1491 
1492       if (q->index < 0)
1493 	{
1494 	  x = gimple_build_assign (finally_tmp,
1495 				   build_int_cst (integer_type_node,
1496 						  return_index));
1497 	  gimple_seq_add_stmt (&mod, x);
1498 	  do_return_redirection (q, finally_label, mod);
1499 	  switch_id = return_index;
1500 	}
1501       else
1502 	{
1503 	  x = gimple_build_assign (finally_tmp,
1504 				   build_int_cst (integer_type_node, q->index));
1505 	  gimple_seq_add_stmt (&mod, x);
1506 	  do_goto_redirection (q, finally_label, mod, tf);
1507 	  switch_id = q->index;
1508 	}
1509 
1510       case_index = j + q->index;
1511       if (case_label_vec.length () <= case_index || !case_label_vec[case_index])
1512         {
1513           tree case_lab;
1514 	  tmp = build_int_cst (integer_type_node, switch_id);
1515           case_lab = build_case_label (tmp, NULL,
1516 				       create_artificial_label (tf_loc));
1517           /* We store the cont_stmt in the pointer map, so that we can recover
1518              it in the loop below.  */
1519           if (!cont_map)
1520 	    cont_map = new hash_map<tree, gimple *>;
1521           cont_map->put (case_lab, q->cont_stmt);
1522           case_label_vec.quick_push (case_lab);
1523         }
1524     }
1525   for (j = last_case_index; j < last_case_index + nlabels; j++)
1526     {
1527       gimple *cont_stmt;
1528 
1529       last_case = case_label_vec[j];
1530 
1531       gcc_assert (last_case);
1532       gcc_assert (cont_map);
1533 
1534       cont_stmt = *cont_map->get (last_case);
1535 
1536       x = gimple_build_label (CASE_LABEL (last_case));
1537       gimple_seq_add_stmt (&switch_body, x);
1538       gimple_seq_add_stmt (&switch_body, cont_stmt);
1539       maybe_record_in_goto_queue (state, cont_stmt);
1540     }
1541   if (cont_map)
1542     delete cont_map;
1543 
1544   replace_goto_queue (tf);
1545 
1546   /* Make sure that the last case is the default label, as one is required.
1547      Then sort the labels, which is also required in GIMPLE.  */
1548   CASE_LOW (last_case) = NULL;
1549   tree tem = case_label_vec.pop ();
1550   gcc_assert (tem == last_case);
1551   sort_case_labels (case_label_vec);
1552 
1553   /* Build the switch statement, setting last_case to be the default
1554      label.  */
1555   switch_stmt = gimple_build_switch (finally_tmp, last_case,
1556 				     case_label_vec);
1557   gimple_set_location (switch_stmt, finally_loc);
1558 
1559   /* Need to link SWITCH_STMT after running replace_goto_queue
1560      due to not wanting to process the same goto stmts twice.  */
1561   gimple_seq_add_stmt (&tf->top_p_seq, switch_stmt);
1562   gimple_seq_add_seq (&tf->top_p_seq, switch_body);
1563 }
1564 
1565 /* Decide whether or not we are going to duplicate the finally block.
1566    There are several considerations.
1567 
1568    First, if this is Java, then the finally block contains code
1569    written by the user.  It has line numbers associated with it,
1570    so duplicating the block means it's difficult to set a breakpoint.
1571    Since controlling code generation via -g is verboten, we simply
1572    never duplicate code without optimization.
1573 
1574    Second, we'd like to prevent egregious code growth.  One way to
1575    do this is to estimate the size of the finally block, multiply
1576    that by the number of copies we'd need to make, and compare against
1577    the estimate of the size of the switch machinery we'd have to add.  */
1578 
1579 static bool
1580 decide_copy_try_finally (int ndests, bool may_throw, gimple_seq finally)
1581 {
1582   int f_estimate, sw_estimate;
1583   geh_else *eh_else;
1584 
1585   /* If there's an EH_ELSE involved, the exception path is separate
1586      and really doesn't come into play for this computation.  */
1587   eh_else = get_eh_else (finally);
1588   if (eh_else)
1589     {
1590       ndests -= may_throw;
1591       finally = gimple_eh_else_n_body (eh_else);
1592     }
1593 
1594   if (!optimize)
1595     {
1596       gimple_stmt_iterator gsi;
1597 
1598       if (ndests == 1)
1599         return true;
1600 
1601       for (gsi = gsi_start (finally); !gsi_end_p (gsi); gsi_next (&gsi))
1602 	{
1603 	  /* Duplicate __builtin_stack_restore in the hope of eliminating it
1604 	     on the EH paths and, consequently, useless cleanups.  */
1605 	  gimple *stmt = gsi_stmt (gsi);
1606 	  if (!is_gimple_debug (stmt)
1607 	      && !gimple_clobber_p (stmt)
1608 	      && !gimple_call_builtin_p (stmt, BUILT_IN_STACK_RESTORE))
1609 	    return false;
1610 	}
1611       return true;
1612     }
1613 
1614   /* Finally estimate N times, plus N gotos.  */
1615   f_estimate = estimate_num_insns_seq (finally, &eni_size_weights);
1616   f_estimate = (f_estimate + 1) * ndests;
1617 
1618   /* Switch statement (cost 10), N variable assignments, N gotos.  */
1619   sw_estimate = 10 + 2 * ndests;
1620 
1621   /* Optimize for size clearly wants our best guess.  */
1622   if (optimize_function_for_size_p (cfun))
1623     return f_estimate < sw_estimate;
1624 
1625   /* ??? These numbers are completely made up so far.  */
1626   if (optimize > 1)
1627     return f_estimate < 100 || f_estimate < sw_estimate * 2;
1628   else
1629     return f_estimate < 40 || f_estimate * 2 < sw_estimate * 3;
1630 }
1631 
1632 /* REG is the enclosing region for a possible cleanup region, or the region
1633    itself.  Returns TRUE if such a region would be unreachable.
1634 
1635    Cleanup regions within a must-not-throw region aren't actually reachable
1636    even if there are throwing stmts within them, because the personality
1637    routine will call terminate before unwinding.  */
1638 
1639 static bool
1640 cleanup_is_dead_in (eh_region reg)
1641 {
1642   while (reg && reg->type == ERT_CLEANUP)
1643     reg = reg->outer;
1644   return (reg && reg->type == ERT_MUST_NOT_THROW);
1645 }
1646 
1647 /* A subroutine of lower_eh_constructs_1.  Lower a GIMPLE_TRY_FINALLY nodes
1648    to a sequence of labels and blocks, plus the exception region trees
1649    that record all the magic.  This is complicated by the need to
1650    arrange for the FINALLY block to be executed on all exits.  */
1651 
1652 static gimple_seq
1653 lower_try_finally (struct leh_state *state, gtry *tp)
1654 {
1655   struct leh_tf_state this_tf;
1656   struct leh_state this_state;
1657   int ndests;
1658   gimple_seq old_eh_seq;
1659 
1660   /* Process the try block.  */
1661 
1662   memset (&this_tf, 0, sizeof (this_tf));
1663   this_tf.try_finally_expr = tp;
1664   this_tf.top_p = tp;
1665   this_tf.outer = state;
1666   if (using_eh_for_cleanups_p () && !cleanup_is_dead_in (state->cur_region))
1667     {
1668       this_tf.region = gen_eh_region_cleanup (state->cur_region);
1669       this_state.cur_region = this_tf.region;
1670     }
1671   else
1672     {
1673       this_tf.region = NULL;
1674       this_state.cur_region = state->cur_region;
1675     }
1676 
1677   this_state.ehp_region = state->ehp_region;
1678   this_state.tf = &this_tf;
1679 
1680   old_eh_seq = eh_seq;
1681   eh_seq = NULL;
1682 
1683   lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1684 
1685   /* Determine if the try block is escaped through the bottom.  */
1686   this_tf.may_fallthru = gimple_seq_may_fallthru (gimple_try_eval (tp));
1687 
1688   /* Determine if any exceptions are possible within the try block.  */
1689   if (this_tf.region)
1690     this_tf.may_throw = eh_region_may_contain_throw (this_tf.region);
1691   if (this_tf.may_throw)
1692     honor_protect_cleanup_actions (state, &this_state, &this_tf);
1693 
1694   /* Determine how many edges (still) reach the finally block.  Or rather,
1695      how many destinations are reached by the finally block.  Use this to
1696      determine how we process the finally block itself.  */
1697 
1698   ndests = this_tf.dest_array.length ();
1699   ndests += this_tf.may_fallthru;
1700   ndests += this_tf.may_return;
1701   ndests += this_tf.may_throw;
1702 
1703   /* If the FINALLY block is not reachable, dike it out.  */
1704   if (ndests == 0)
1705     {
1706       gimple_seq_add_seq (&this_tf.top_p_seq, gimple_try_eval (tp));
1707       gimple_try_set_cleanup (tp, NULL);
1708     }
1709   /* If the finally block doesn't fall through, then any destination
1710      we might try to impose there isn't reached either.  There may be
1711      some minor amount of cleanup and redirection still needed.  */
1712   else if (!gimple_seq_may_fallthru (gimple_try_cleanup (tp)))
1713     lower_try_finally_nofallthru (state, &this_tf);
1714 
1715   /* We can easily special-case redirection to a single destination.  */
1716   else if (ndests == 1)
1717     lower_try_finally_onedest (state, &this_tf);
1718   else if (decide_copy_try_finally (ndests, this_tf.may_throw,
1719 				    gimple_try_cleanup (tp)))
1720     lower_try_finally_copy (state, &this_tf);
1721   else
1722     lower_try_finally_switch (state, &this_tf);
1723 
1724   /* If someone requested we add a label at the end of the transformed
1725      block, do so.  */
1726   if (this_tf.fallthru_label)
1727     {
1728       /* This must be reached only if ndests == 0. */
1729       gimple *x = gimple_build_label (this_tf.fallthru_label);
1730       gimple_seq_add_stmt (&this_tf.top_p_seq, x);
1731     }
1732 
1733   this_tf.dest_array.release ();
1734   free (this_tf.goto_queue);
1735   if (this_tf.goto_queue_map)
1736     delete this_tf.goto_queue_map;
1737 
1738   /* If there was an old (aka outer) eh_seq, append the current eh_seq.
1739      If there was no old eh_seq, then the append is trivially already done.  */
1740   if (old_eh_seq)
1741     {
1742       if (eh_seq == NULL)
1743 	eh_seq = old_eh_seq;
1744       else
1745 	{
1746 	  gimple_seq new_eh_seq = eh_seq;
1747 	  eh_seq = old_eh_seq;
1748 	  gimple_seq_add_seq (&eh_seq, new_eh_seq);
1749 	}
1750     }
1751 
1752   return this_tf.top_p_seq;
1753 }
1754 
1755 /* A subroutine of lower_eh_constructs_1.  Lower a GIMPLE_TRY_CATCH with a
1756    list of GIMPLE_CATCH to a sequence of labels and blocks, plus the
1757    exception region trees that records all the magic.  */
1758 
1759 static gimple_seq
1760 lower_catch (struct leh_state *state, gtry *tp)
1761 {
1762   eh_region try_region = NULL;
1763   struct leh_state this_state = *state;
1764   gimple_stmt_iterator gsi;
1765   tree out_label;
1766   gimple_seq new_seq, cleanup;
1767   gimple *x;
1768   location_t try_catch_loc = gimple_location (tp);
1769 
1770   if (flag_exceptions)
1771     {
1772       try_region = gen_eh_region_try (state->cur_region);
1773       this_state.cur_region = try_region;
1774     }
1775 
1776   lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1777 
1778   if (!eh_region_may_contain_throw (try_region))
1779     return gimple_try_eval (tp);
1780 
1781   new_seq = NULL;
1782   emit_eh_dispatch (&new_seq, try_region);
1783   emit_resx (&new_seq, try_region);
1784 
1785   this_state.cur_region = state->cur_region;
1786   this_state.ehp_region = try_region;
1787 
1788   /* Add eh_seq from lowering EH in the cleanup sequence after the cleanup
1789      itself, so that e.g. for coverage purposes the nested cleanups don't
1790      appear before the cleanup body.  See PR64634 for details.  */
1791   gimple_seq old_eh_seq = eh_seq;
1792   eh_seq = NULL;
1793 
1794   out_label = NULL;
1795   cleanup = gimple_try_cleanup (tp);
1796   for (gsi = gsi_start (cleanup);
1797        !gsi_end_p (gsi);
1798        gsi_next (&gsi))
1799     {
1800       eh_catch c;
1801       gcatch *catch_stmt;
1802       gimple_seq handler;
1803 
1804       catch_stmt = as_a <gcatch *> (gsi_stmt (gsi));
1805       c = gen_eh_region_catch (try_region, gimple_catch_types (catch_stmt));
1806 
1807       handler = gimple_catch_handler (catch_stmt);
1808       lower_eh_constructs_1 (&this_state, &handler);
1809 
1810       c->label = create_artificial_label (UNKNOWN_LOCATION);
1811       x = gimple_build_label (c->label);
1812       gimple_seq_add_stmt (&new_seq, x);
1813 
1814       gimple_seq_add_seq (&new_seq, handler);
1815 
1816       if (gimple_seq_may_fallthru (new_seq))
1817 	{
1818 	  if (!out_label)
1819 	    out_label = create_artificial_label (try_catch_loc);
1820 
1821 	  x = gimple_build_goto (out_label);
1822 	  gimple_seq_add_stmt (&new_seq, x);
1823 	}
1824       if (!c->type_list)
1825 	break;
1826     }
1827 
1828   gimple_try_set_cleanup (tp, new_seq);
1829 
1830   gimple_seq new_eh_seq = eh_seq;
1831   eh_seq = old_eh_seq;
1832   gimple_seq ret_seq = frob_into_branch_around (tp, try_region, out_label);
1833   gimple_seq_add_seq (&eh_seq, new_eh_seq);
1834   return ret_seq;
1835 }
1836 
1837 /* A subroutine of lower_eh_constructs_1.  Lower a GIMPLE_TRY with a
1838    GIMPLE_EH_FILTER to a sequence of labels and blocks, plus the exception
1839    region trees that record all the magic.  */
1840 
1841 static gimple_seq
1842 lower_eh_filter (struct leh_state *state, gtry *tp)
1843 {
1844   struct leh_state this_state = *state;
1845   eh_region this_region = NULL;
1846   gimple *inner, *x;
1847   gimple_seq new_seq;
1848 
1849   inner = gimple_seq_first_stmt (gimple_try_cleanup (tp));
1850 
1851   if (flag_exceptions)
1852     {
1853       this_region = gen_eh_region_allowed (state->cur_region,
1854 				           gimple_eh_filter_types (inner));
1855       this_state.cur_region = this_region;
1856     }
1857 
1858   lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1859 
1860   if (!eh_region_may_contain_throw (this_region))
1861     return gimple_try_eval (tp);
1862 
1863   new_seq = NULL;
1864   this_state.cur_region = state->cur_region;
1865   this_state.ehp_region = this_region;
1866 
1867   emit_eh_dispatch (&new_seq, this_region);
1868   emit_resx (&new_seq, this_region);
1869 
1870   this_region->u.allowed.label = create_artificial_label (UNKNOWN_LOCATION);
1871   x = gimple_build_label (this_region->u.allowed.label);
1872   gimple_seq_add_stmt (&new_seq, x);
1873 
1874   lower_eh_constructs_1 (&this_state, gimple_eh_filter_failure_ptr (inner));
1875   gimple_seq_add_seq (&new_seq, gimple_eh_filter_failure (inner));
1876 
1877   gimple_try_set_cleanup (tp, new_seq);
1878 
1879   return frob_into_branch_around (tp, this_region, NULL);
1880 }
1881 
1882 /* A subroutine of lower_eh_constructs_1.  Lower a GIMPLE_TRY with
1883    an GIMPLE_EH_MUST_NOT_THROW to a sequence of labels and blocks,
1884    plus the exception region trees that record all the magic.  */
1885 
1886 static gimple_seq
1887 lower_eh_must_not_throw (struct leh_state *state, gtry *tp)
1888 {
1889   struct leh_state this_state = *state;
1890 
1891   if (flag_exceptions)
1892     {
1893       gimple *inner = gimple_seq_first_stmt (gimple_try_cleanup (tp));
1894       eh_region this_region;
1895 
1896       this_region = gen_eh_region_must_not_throw (state->cur_region);
1897       this_region->u.must_not_throw.failure_decl
1898 	= gimple_eh_must_not_throw_fndecl (
1899 	    as_a <geh_mnt *> (inner));
1900       this_region->u.must_not_throw.failure_loc
1901 	= LOCATION_LOCUS (gimple_location (tp));
1902 
1903       /* In order to get mangling applied to this decl, we must mark it
1904 	 used now.  Otherwise, pass_ipa_free_lang_data won't think it
1905 	 needs to happen.  */
1906       TREE_USED (this_region->u.must_not_throw.failure_decl) = 1;
1907 
1908       this_state.cur_region = this_region;
1909     }
1910 
1911   lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1912 
1913   return gimple_try_eval (tp);
1914 }
1915 
1916 /* Implement a cleanup expression.  This is similar to try-finally,
1917    except that we only execute the cleanup block for exception edges.  */
1918 
1919 static gimple_seq
1920 lower_cleanup (struct leh_state *state, gtry *tp)
1921 {
1922   struct leh_state this_state = *state;
1923   eh_region this_region = NULL;
1924   struct leh_tf_state fake_tf;
1925   gimple_seq result;
1926   bool cleanup_dead = cleanup_is_dead_in (state->cur_region);
1927 
1928   if (flag_exceptions && !cleanup_dead)
1929     {
1930       this_region = gen_eh_region_cleanup (state->cur_region);
1931       this_state.cur_region = this_region;
1932     }
1933 
1934   lower_eh_constructs_1 (&this_state, gimple_try_eval_ptr (tp));
1935 
1936   if (cleanup_dead || !eh_region_may_contain_throw (this_region))
1937     return gimple_try_eval (tp);
1938 
1939   /* Build enough of a try-finally state so that we can reuse
1940      honor_protect_cleanup_actions.  */
1941   memset (&fake_tf, 0, sizeof (fake_tf));
1942   fake_tf.top_p = fake_tf.try_finally_expr = tp;
1943   fake_tf.outer = state;
1944   fake_tf.region = this_region;
1945   fake_tf.may_fallthru = gimple_seq_may_fallthru (gimple_try_eval (tp));
1946   fake_tf.may_throw = true;
1947 
1948   honor_protect_cleanup_actions (state, NULL, &fake_tf);
1949 
1950   if (fake_tf.may_throw)
1951     {
1952       /* In this case honor_protect_cleanup_actions had nothing to do,
1953 	 and we should process this normally.  */
1954       lower_eh_constructs_1 (state, gimple_try_cleanup_ptr (tp));
1955       result = frob_into_branch_around (tp, this_region,
1956                                         fake_tf.fallthru_label);
1957     }
1958   else
1959     {
1960       /* In this case honor_protect_cleanup_actions did nearly all of
1961 	 the work.  All we have left is to append the fallthru_label.  */
1962 
1963       result = gimple_try_eval (tp);
1964       if (fake_tf.fallthru_label)
1965 	{
1966 	  gimple *x = gimple_build_label (fake_tf.fallthru_label);
1967 	  gimple_seq_add_stmt (&result, x);
1968 	}
1969     }
1970   return result;
1971 }
1972 
1973 /* Main loop for lowering eh constructs. Also moves gsi to the next
1974    statement. */
1975 
1976 static void
1977 lower_eh_constructs_2 (struct leh_state *state, gimple_stmt_iterator *gsi)
1978 {
1979   gimple_seq replace;
1980   gimple *x;
1981   gimple *stmt = gsi_stmt (*gsi);
1982 
1983   switch (gimple_code (stmt))
1984     {
1985     case GIMPLE_CALL:
1986       {
1987 	tree fndecl = gimple_call_fndecl (stmt);
1988 	tree rhs, lhs;
1989 
1990 	if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
1991 	  switch (DECL_FUNCTION_CODE (fndecl))
1992 	    {
1993 	    case BUILT_IN_EH_POINTER:
1994 	      /* The front end may have generated a call to
1995 		 __builtin_eh_pointer (0) within a catch region.  Replace
1996 		 this zero argument with the current catch region number.  */
1997 	      if (state->ehp_region)
1998 		{
1999 		  tree nr = build_int_cst (integer_type_node,
2000 					   state->ehp_region->index);
2001 		  gimple_call_set_arg (stmt, 0, nr);
2002 		}
2003 	      else
2004 		{
2005 		  /* The user has dome something silly.  Remove it.  */
2006 		  rhs = null_pointer_node;
2007 		  goto do_replace;
2008 		}
2009 	      break;
2010 
2011 	    case BUILT_IN_EH_FILTER:
2012 	      /* ??? This should never appear, but since it's a builtin it
2013 		 is accessible to abuse by users.  Just remove it and
2014 		 replace the use with the arbitrary value zero.  */
2015 	      rhs = build_int_cst (TREE_TYPE (TREE_TYPE (fndecl)), 0);
2016 	    do_replace:
2017 	      lhs = gimple_call_lhs (stmt);
2018 	      x = gimple_build_assign (lhs, rhs);
2019 	      gsi_insert_before (gsi, x, GSI_SAME_STMT);
2020 	      /* FALLTHRU */
2021 
2022 	    case BUILT_IN_EH_COPY_VALUES:
2023 	      /* Likewise this should not appear.  Remove it.  */
2024 	      gsi_remove (gsi, true);
2025 	      return;
2026 
2027 	    default:
2028 	      break;
2029 	    }
2030       }
2031       /* FALLTHRU */
2032 
2033     case GIMPLE_ASSIGN:
2034       /* If the stmt can throw use a new temporary for the assignment
2035          to a LHS.  This makes sure the old value of the LHS is
2036 	 available on the EH edge.  Only do so for statements that
2037 	 potentially fall through (no noreturn calls e.g.), otherwise
2038 	 this new assignment might create fake fallthru regions.  */
2039       if (stmt_could_throw_p (stmt)
2040 	  && gimple_has_lhs (stmt)
2041 	  && gimple_stmt_may_fallthru (stmt)
2042 	  && !tree_could_throw_p (gimple_get_lhs (stmt))
2043 	  && is_gimple_reg_type (TREE_TYPE (gimple_get_lhs (stmt))))
2044 	{
2045 	  tree lhs = gimple_get_lhs (stmt);
2046 	  tree tmp = create_tmp_var (TREE_TYPE (lhs));
2047 	  gimple *s = gimple_build_assign (lhs, tmp);
2048 	  gimple_set_location (s, gimple_location (stmt));
2049 	  gimple_set_block (s, gimple_block (stmt));
2050 	  gimple_set_lhs (stmt, tmp);
2051 	  if (TREE_CODE (TREE_TYPE (tmp)) == COMPLEX_TYPE
2052 	      || TREE_CODE (TREE_TYPE (tmp)) == VECTOR_TYPE)
2053 	    DECL_GIMPLE_REG_P (tmp) = 1;
2054 	  gsi_insert_after (gsi, s, GSI_SAME_STMT);
2055 	}
2056       /* Look for things that can throw exceptions, and record them.  */
2057       if (state->cur_region && stmt_could_throw_p (stmt))
2058 	{
2059 	  record_stmt_eh_region (state->cur_region, stmt);
2060 	  note_eh_region_may_contain_throw (state->cur_region);
2061 	}
2062       break;
2063 
2064     case GIMPLE_COND:
2065     case GIMPLE_GOTO:
2066     case GIMPLE_RETURN:
2067       maybe_record_in_goto_queue (state, stmt);
2068       break;
2069 
2070     case GIMPLE_SWITCH:
2071       verify_norecord_switch_expr (state, as_a <gswitch *> (stmt));
2072       break;
2073 
2074     case GIMPLE_TRY:
2075       {
2076 	gtry *try_stmt = as_a <gtry *> (stmt);
2077 	if (gimple_try_kind (try_stmt) == GIMPLE_TRY_FINALLY)
2078 	  replace = lower_try_finally (state, try_stmt);
2079 	else
2080 	  {
2081 	    x = gimple_seq_first_stmt (gimple_try_cleanup (try_stmt));
2082 	    if (!x)
2083 	      {
2084 		replace = gimple_try_eval (try_stmt);
2085 		lower_eh_constructs_1 (state, &replace);
2086 	      }
2087 	    else
2088 	      switch (gimple_code (x))
2089 		{
2090 		case GIMPLE_CATCH:
2091 		  replace = lower_catch (state, try_stmt);
2092 		  break;
2093 		case GIMPLE_EH_FILTER:
2094 		  replace = lower_eh_filter (state, try_stmt);
2095 		  break;
2096 		case GIMPLE_EH_MUST_NOT_THROW:
2097 		  replace = lower_eh_must_not_throw (state, try_stmt);
2098 		  break;
2099 		case GIMPLE_EH_ELSE:
2100 		  /* This code is only valid with GIMPLE_TRY_FINALLY.  */
2101 		  gcc_unreachable ();
2102 		default:
2103 		  replace = lower_cleanup (state, try_stmt);
2104 		  break;
2105 		}
2106 	  }
2107       }
2108 
2109       /* Remove the old stmt and insert the transformed sequence
2110 	 instead. */
2111       gsi_insert_seq_before (gsi, replace, GSI_SAME_STMT);
2112       gsi_remove (gsi, true);
2113 
2114       /* Return since we don't want gsi_next () */
2115       return;
2116 
2117     case GIMPLE_EH_ELSE:
2118       /* We should be eliminating this in lower_try_finally et al.  */
2119       gcc_unreachable ();
2120 
2121     default:
2122       /* A type, a decl, or some kind of statement that we're not
2123 	 interested in.  Don't walk them.  */
2124       break;
2125     }
2126 
2127   gsi_next (gsi);
2128 }
2129 
2130 /* A helper to unwrap a gimple_seq and feed stmts to lower_eh_constructs_2. */
2131 
2132 static void
2133 lower_eh_constructs_1 (struct leh_state *state, gimple_seq *pseq)
2134 {
2135   gimple_stmt_iterator gsi;
2136   for (gsi = gsi_start (*pseq); !gsi_end_p (gsi);)
2137     lower_eh_constructs_2 (state, &gsi);
2138 }
2139 
2140 namespace {
2141 
2142 const pass_data pass_data_lower_eh =
2143 {
2144   GIMPLE_PASS, /* type */
2145   "eh", /* name */
2146   OPTGROUP_NONE, /* optinfo_flags */
2147   TV_TREE_EH, /* tv_id */
2148   PROP_gimple_lcf, /* properties_required */
2149   PROP_gimple_leh, /* properties_provided */
2150   0, /* properties_destroyed */
2151   0, /* todo_flags_start */
2152   0, /* todo_flags_finish */
2153 };
2154 
2155 class pass_lower_eh : public gimple_opt_pass
2156 {
2157 public:
2158   pass_lower_eh (gcc::context *ctxt)
2159     : gimple_opt_pass (pass_data_lower_eh, ctxt)
2160   {}
2161 
2162   /* opt_pass methods: */
2163   virtual unsigned int execute (function *);
2164 
2165 }; // class pass_lower_eh
2166 
2167 unsigned int
2168 pass_lower_eh::execute (function *fun)
2169 {
2170   struct leh_state null_state;
2171   gimple_seq bodyp;
2172 
2173   bodyp = gimple_body (current_function_decl);
2174   if (bodyp == NULL)
2175     return 0;
2176 
2177   finally_tree = new hash_table<finally_tree_hasher> (31);
2178   eh_region_may_contain_throw_map = BITMAP_ALLOC (NULL);
2179   memset (&null_state, 0, sizeof (null_state));
2180 
2181   collect_finally_tree_1 (bodyp, NULL);
2182   lower_eh_constructs_1 (&null_state, &bodyp);
2183   gimple_set_body (current_function_decl, bodyp);
2184 
2185   /* We assume there's a return statement, or something, at the end of
2186      the function, and thus ploping the EH sequence afterward won't
2187      change anything.  */
2188   gcc_assert (!gimple_seq_may_fallthru (bodyp));
2189   gimple_seq_add_seq (&bodyp, eh_seq);
2190 
2191   /* We assume that since BODYP already existed, adding EH_SEQ to it
2192      didn't change its value, and we don't have to re-set the function.  */
2193   gcc_assert (bodyp == gimple_body (current_function_decl));
2194 
2195   delete finally_tree;
2196   finally_tree = NULL;
2197   BITMAP_FREE (eh_region_may_contain_throw_map);
2198   eh_seq = NULL;
2199 
2200   /* If this function needs a language specific EH personality routine
2201      and the frontend didn't already set one do so now.  */
2202   if (function_needs_eh_personality (fun) == eh_personality_lang
2203       && !DECL_FUNCTION_PERSONALITY (current_function_decl))
2204     DECL_FUNCTION_PERSONALITY (current_function_decl)
2205       = lang_hooks.eh_personality ();
2206 
2207   return 0;
2208 }
2209 
2210 } // anon namespace
2211 
2212 gimple_opt_pass *
2213 make_pass_lower_eh (gcc::context *ctxt)
2214 {
2215   return new pass_lower_eh (ctxt);
2216 }
2217 
2218 /* Create the multiple edges from an EH_DISPATCH statement to all of
2219    the possible handlers for its EH region.  Return true if there's
2220    no fallthru edge; false if there is.  */
2221 
2222 bool
2223 make_eh_dispatch_edges (geh_dispatch *stmt)
2224 {
2225   eh_region r;
2226   eh_catch c;
2227   basic_block src, dst;
2228 
2229   r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
2230   src = gimple_bb (stmt);
2231 
2232   switch (r->type)
2233     {
2234     case ERT_TRY:
2235       for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
2236 	{
2237 	  dst = label_to_block (c->label);
2238 	  make_edge (src, dst, 0);
2239 
2240 	  /* A catch-all handler doesn't have a fallthru.  */
2241 	  if (c->type_list == NULL)
2242 	    return false;
2243 	}
2244       break;
2245 
2246     case ERT_ALLOWED_EXCEPTIONS:
2247       dst = label_to_block (r->u.allowed.label);
2248       make_edge (src, dst, 0);
2249       break;
2250 
2251     default:
2252       gcc_unreachable ();
2253     }
2254 
2255   return true;
2256 }
2257 
2258 /* Create the single EH edge from STMT to its nearest landing pad,
2259    if there is such a landing pad within the current function.  */
2260 
2261 void
2262 make_eh_edges (gimple *stmt)
2263 {
2264   basic_block src, dst;
2265   eh_landing_pad lp;
2266   int lp_nr;
2267 
2268   lp_nr = lookup_stmt_eh_lp (stmt);
2269   if (lp_nr <= 0)
2270     return;
2271 
2272   lp = get_eh_landing_pad_from_number (lp_nr);
2273   gcc_assert (lp != NULL);
2274 
2275   src = gimple_bb (stmt);
2276   dst = label_to_block (lp->post_landing_pad);
2277   make_edge (src, dst, EDGE_EH);
2278 }
2279 
2280 /* Do the work in redirecting EDGE_IN to NEW_BB within the EH region tree;
2281    do not actually perform the final edge redirection.
2282 
2283    CHANGE_REGION is true when we're being called from cleanup_empty_eh and
2284    we intend to change the destination EH region as well; this means
2285    EH_LANDING_PAD_NR must already be set on the destination block label.
2286    If false, we're being called from generic cfg manipulation code and we
2287    should preserve our place within the region tree.  */
2288 
2289 static void
2290 redirect_eh_edge_1 (edge edge_in, basic_block new_bb, bool change_region)
2291 {
2292   eh_landing_pad old_lp, new_lp;
2293   basic_block old_bb;
2294   gimple *throw_stmt;
2295   int old_lp_nr, new_lp_nr;
2296   tree old_label, new_label;
2297   edge_iterator ei;
2298   edge e;
2299 
2300   old_bb = edge_in->dest;
2301   old_label = gimple_block_label (old_bb);
2302   old_lp_nr = EH_LANDING_PAD_NR (old_label);
2303   gcc_assert (old_lp_nr > 0);
2304   old_lp = get_eh_landing_pad_from_number (old_lp_nr);
2305 
2306   throw_stmt = last_stmt (edge_in->src);
2307   gcc_assert (lookup_stmt_eh_lp (throw_stmt) == old_lp_nr);
2308 
2309   new_label = gimple_block_label (new_bb);
2310 
2311   /* Look for an existing region that might be using NEW_BB already.  */
2312   new_lp_nr = EH_LANDING_PAD_NR (new_label);
2313   if (new_lp_nr)
2314     {
2315       new_lp = get_eh_landing_pad_from_number (new_lp_nr);
2316       gcc_assert (new_lp);
2317 
2318       /* Unless CHANGE_REGION is true, the new and old landing pad
2319 	 had better be associated with the same EH region.  */
2320       gcc_assert (change_region || new_lp->region == old_lp->region);
2321     }
2322   else
2323     {
2324       new_lp = NULL;
2325       gcc_assert (!change_region);
2326     }
2327 
2328   /* Notice when we redirect the last EH edge away from OLD_BB.  */
2329   FOR_EACH_EDGE (e, ei, old_bb->preds)
2330     if (e != edge_in && (e->flags & EDGE_EH))
2331       break;
2332 
2333   if (new_lp)
2334     {
2335       /* NEW_LP already exists.  If there are still edges into OLD_LP,
2336 	 there's nothing to do with the EH tree.  If there are no more
2337 	 edges into OLD_LP, then we want to remove OLD_LP as it is unused.
2338 	 If CHANGE_REGION is true, then our caller is expecting to remove
2339 	 the landing pad.  */
2340       if (e == NULL && !change_region)
2341 	remove_eh_landing_pad (old_lp);
2342     }
2343   else
2344     {
2345       /* No correct landing pad exists.  If there are no more edges
2346 	 into OLD_LP, then we can simply re-use the existing landing pad.
2347 	 Otherwise, we have to create a new landing pad.  */
2348       if (e == NULL)
2349 	{
2350 	  EH_LANDING_PAD_NR (old_lp->post_landing_pad) = 0;
2351 	  new_lp = old_lp;
2352 	}
2353       else
2354 	new_lp = gen_eh_landing_pad (old_lp->region);
2355       new_lp->post_landing_pad = new_label;
2356       EH_LANDING_PAD_NR (new_label) = new_lp->index;
2357     }
2358 
2359   /* Maybe move the throwing statement to the new region.  */
2360   if (old_lp != new_lp)
2361     {
2362       remove_stmt_from_eh_lp (throw_stmt);
2363       add_stmt_to_eh_lp (throw_stmt, new_lp->index);
2364     }
2365 }
2366 
2367 /* Redirect EH edge E to NEW_BB.  */
2368 
2369 edge
2370 redirect_eh_edge (edge edge_in, basic_block new_bb)
2371 {
2372   redirect_eh_edge_1 (edge_in, new_bb, false);
2373   return ssa_redirect_edge (edge_in, new_bb);
2374 }
2375 
2376 /* This is a subroutine of gimple_redirect_edge_and_branch.  Update the
2377    labels for redirecting a non-fallthru EH_DISPATCH edge E to NEW_BB.
2378    The actual edge update will happen in the caller.  */
2379 
2380 void
2381 redirect_eh_dispatch_edge (geh_dispatch *stmt, edge e, basic_block new_bb)
2382 {
2383   tree new_lab = gimple_block_label (new_bb);
2384   bool any_changed = false;
2385   basic_block old_bb;
2386   eh_region r;
2387   eh_catch c;
2388 
2389   r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
2390   switch (r->type)
2391     {
2392     case ERT_TRY:
2393       for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
2394 	{
2395 	  old_bb = label_to_block (c->label);
2396 	  if (old_bb == e->dest)
2397 	    {
2398 	      c->label = new_lab;
2399 	      any_changed = true;
2400 	    }
2401 	}
2402       break;
2403 
2404     case ERT_ALLOWED_EXCEPTIONS:
2405       old_bb = label_to_block (r->u.allowed.label);
2406       gcc_assert (old_bb == e->dest);
2407       r->u.allowed.label = new_lab;
2408       any_changed = true;
2409       break;
2410 
2411     default:
2412       gcc_unreachable ();
2413     }
2414 
2415   gcc_assert (any_changed);
2416 }
2417 
2418 /* Helper function for operation_could_trap_p and stmt_could_throw_p.  */
2419 
2420 bool
2421 operation_could_trap_helper_p (enum tree_code op,
2422 			       bool fp_operation,
2423 			       bool honor_trapv,
2424 			       bool honor_nans,
2425 			       bool honor_snans,
2426 			       tree divisor,
2427 			       bool *handled)
2428 {
2429   *handled = true;
2430   switch (op)
2431     {
2432     case TRUNC_DIV_EXPR:
2433     case CEIL_DIV_EXPR:
2434     case FLOOR_DIV_EXPR:
2435     case ROUND_DIV_EXPR:
2436     case EXACT_DIV_EXPR:
2437     case CEIL_MOD_EXPR:
2438     case FLOOR_MOD_EXPR:
2439     case ROUND_MOD_EXPR:
2440     case TRUNC_MOD_EXPR:
2441     case RDIV_EXPR:
2442       if (honor_snans)
2443 	return true;
2444       if (fp_operation)
2445 	return flag_trapping_math;
2446       if (!TREE_CONSTANT (divisor) || integer_zerop (divisor))
2447         return true;
2448       return false;
2449 
2450     case LT_EXPR:
2451     case LE_EXPR:
2452     case GT_EXPR:
2453     case GE_EXPR:
2454     case LTGT_EXPR:
2455       /* Some floating point comparisons may trap.  */
2456       return honor_nans;
2457 
2458     case EQ_EXPR:
2459     case NE_EXPR:
2460     case UNORDERED_EXPR:
2461     case ORDERED_EXPR:
2462     case UNLT_EXPR:
2463     case UNLE_EXPR:
2464     case UNGT_EXPR:
2465     case UNGE_EXPR:
2466     case UNEQ_EXPR:
2467       return honor_snans;
2468 
2469     case NEGATE_EXPR:
2470     case ABS_EXPR:
2471     case CONJ_EXPR:
2472       /* These operations don't trap with floating point.  */
2473       if (honor_trapv)
2474 	return true;
2475       return false;
2476 
2477     case PLUS_EXPR:
2478     case MINUS_EXPR:
2479     case MULT_EXPR:
2480       /* Any floating arithmetic may trap.  */
2481       if (fp_operation && flag_trapping_math)
2482 	return true;
2483       if (honor_trapv)
2484 	return true;
2485       return false;
2486 
2487     case COMPLEX_EXPR:
2488     case CONSTRUCTOR:
2489       /* Constructing an object cannot trap.  */
2490       return false;
2491 
2492     default:
2493       /* Any floating arithmetic may trap.  */
2494       if (fp_operation && flag_trapping_math)
2495 	return true;
2496 
2497       *handled = false;
2498       return false;
2499     }
2500 }
2501 
2502 /* Return true if operation OP may trap.  FP_OPERATION is true if OP is applied
2503    on floating-point values.  HONOR_TRAPV is true if OP is applied on integer
2504    type operands that may trap.  If OP is a division operator, DIVISOR contains
2505    the value of the divisor.  */
2506 
2507 bool
2508 operation_could_trap_p (enum tree_code op, bool fp_operation, bool honor_trapv,
2509 			tree divisor)
2510 {
2511   bool honor_nans = (fp_operation && flag_trapping_math
2512 		     && !flag_finite_math_only);
2513   bool honor_snans = fp_operation && flag_signaling_nans != 0;
2514   bool handled;
2515 
2516   if (TREE_CODE_CLASS (op) != tcc_comparison
2517       && TREE_CODE_CLASS (op) != tcc_unary
2518       && TREE_CODE_CLASS (op) != tcc_binary
2519       && op != FMA_EXPR)
2520     return false;
2521 
2522   return operation_could_trap_helper_p (op, fp_operation, honor_trapv,
2523 					honor_nans, honor_snans, divisor,
2524 					&handled);
2525 }
2526 
2527 
2528 /* Returns true if it is possible to prove that the index of
2529    an array access REF (an ARRAY_REF expression) falls into the
2530    array bounds.  */
2531 
2532 static bool
2533 in_array_bounds_p (tree ref)
2534 {
2535   tree idx = TREE_OPERAND (ref, 1);
2536   tree min, max;
2537 
2538   if (TREE_CODE (idx) != INTEGER_CST)
2539     return false;
2540 
2541   min = array_ref_low_bound (ref);
2542   max = array_ref_up_bound (ref);
2543   if (!min
2544       || !max
2545       || TREE_CODE (min) != INTEGER_CST
2546       || TREE_CODE (max) != INTEGER_CST)
2547     return false;
2548 
2549   if (tree_int_cst_lt (idx, min)
2550       || tree_int_cst_lt (max, idx))
2551     return false;
2552 
2553   return true;
2554 }
2555 
2556 /* Returns true if it is possible to prove that the range of
2557    an array access REF (an ARRAY_RANGE_REF expression) falls
2558    into the array bounds.  */
2559 
2560 static bool
2561 range_in_array_bounds_p (tree ref)
2562 {
2563   tree domain_type = TYPE_DOMAIN (TREE_TYPE (ref));
2564   tree range_min, range_max, min, max;
2565 
2566   range_min = TYPE_MIN_VALUE (domain_type);
2567   range_max = TYPE_MAX_VALUE (domain_type);
2568   if (!range_min
2569       || !range_max
2570       || TREE_CODE (range_min) != INTEGER_CST
2571       || TREE_CODE (range_max) != INTEGER_CST)
2572     return false;
2573 
2574   min = array_ref_low_bound (ref);
2575   max = array_ref_up_bound (ref);
2576   if (!min
2577       || !max
2578       || TREE_CODE (min) != INTEGER_CST
2579       || TREE_CODE (max) != INTEGER_CST)
2580     return false;
2581 
2582   if (tree_int_cst_lt (range_min, min)
2583       || tree_int_cst_lt (max, range_max))
2584     return false;
2585 
2586   return true;
2587 }
2588 
2589 /* Return true if EXPR can trap, as in dereferencing an invalid pointer
2590    location or floating point arithmetic.  C.f. the rtl version, may_trap_p.
2591    This routine expects only GIMPLE lhs or rhs input.  */
2592 
2593 bool
2594 tree_could_trap_p (tree expr)
2595 {
2596   enum tree_code code;
2597   bool fp_operation = false;
2598   bool honor_trapv = false;
2599   tree t, base, div = NULL_TREE;
2600 
2601   if (!expr)
2602     return false;
2603 
2604   code = TREE_CODE (expr);
2605   t = TREE_TYPE (expr);
2606 
2607   if (t)
2608     {
2609       if (COMPARISON_CLASS_P (expr))
2610 	fp_operation = FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (expr, 0)));
2611       else
2612 	fp_operation = FLOAT_TYPE_P (t);
2613       honor_trapv = INTEGRAL_TYPE_P (t) && TYPE_OVERFLOW_TRAPS (t);
2614     }
2615 
2616   if (TREE_CODE_CLASS (code) == tcc_binary)
2617     div = TREE_OPERAND (expr, 1);
2618   if (operation_could_trap_p (code, fp_operation, honor_trapv, div))
2619     return true;
2620 
2621  restart:
2622   switch (code)
2623     {
2624     case COMPONENT_REF:
2625     case REALPART_EXPR:
2626     case IMAGPART_EXPR:
2627     case BIT_FIELD_REF:
2628     case VIEW_CONVERT_EXPR:
2629     case WITH_SIZE_EXPR:
2630       expr = TREE_OPERAND (expr, 0);
2631       code = TREE_CODE (expr);
2632       goto restart;
2633 
2634     case ARRAY_RANGE_REF:
2635       base = TREE_OPERAND (expr, 0);
2636       if (tree_could_trap_p (base))
2637 	return true;
2638       if (TREE_THIS_NOTRAP (expr))
2639 	return false;
2640       return !range_in_array_bounds_p (expr);
2641 
2642     case ARRAY_REF:
2643       base = TREE_OPERAND (expr, 0);
2644       if (tree_could_trap_p (base))
2645 	return true;
2646       if (TREE_THIS_NOTRAP (expr))
2647 	return false;
2648       return !in_array_bounds_p (expr);
2649 
2650     case TARGET_MEM_REF:
2651     case MEM_REF:
2652       if (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR
2653 	  && tree_could_trap_p (TREE_OPERAND (TREE_OPERAND (expr, 0), 0)))
2654 	return true;
2655       if (TREE_THIS_NOTRAP (expr))
2656 	return false;
2657       /* We cannot prove that the access is in-bounds when we have
2658          variable-index TARGET_MEM_REFs.  */
2659       if (code == TARGET_MEM_REF
2660 	  && (TMR_INDEX (expr) || TMR_INDEX2 (expr)))
2661 	return true;
2662       if (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR)
2663 	{
2664 	  tree base = TREE_OPERAND (TREE_OPERAND (expr, 0), 0);
2665 	  offset_int off = mem_ref_offset (expr);
2666 	  if (wi::neg_p (off, SIGNED))
2667 	    return true;
2668 	  if (TREE_CODE (base) == STRING_CST)
2669 	    return wi::leu_p (TREE_STRING_LENGTH (base), off);
2670 	  else if (DECL_SIZE_UNIT (base) == NULL_TREE
2671 		   || TREE_CODE (DECL_SIZE_UNIT (base)) != INTEGER_CST
2672 		   || wi::leu_p (wi::to_offset (DECL_SIZE_UNIT (base)), off))
2673 	    return true;
2674 	  /* Now we are sure the first byte of the access is inside
2675 	     the object.  */
2676 	  return false;
2677 	}
2678       return true;
2679 
2680     case INDIRECT_REF:
2681       return !TREE_THIS_NOTRAP (expr);
2682 
2683     case ASM_EXPR:
2684       return TREE_THIS_VOLATILE (expr);
2685 
2686     case CALL_EXPR:
2687       t = get_callee_fndecl (expr);
2688       /* Assume that calls to weak functions may trap.  */
2689       if (!t || !DECL_P (t))
2690 	return true;
2691       if (DECL_WEAK (t))
2692 	return tree_could_trap_p (t);
2693       return false;
2694 
2695     case FUNCTION_DECL:
2696       /* Assume that accesses to weak functions may trap, unless we know
2697 	 they are certainly defined in current TU or in some other
2698 	 LTO partition.  */
2699       if (DECL_WEAK (expr) && !DECL_COMDAT (expr) && DECL_EXTERNAL (expr))
2700 	{
2701 	  cgraph_node *node = cgraph_node::get (expr);
2702 	  if (node)
2703 	    node = node->function_symbol ();
2704 	  return !(node && node->in_other_partition);
2705 	}
2706       return false;
2707 
2708     case VAR_DECL:
2709       /* Assume that accesses to weak vars may trap, unless we know
2710 	 they are certainly defined in current TU or in some other
2711 	 LTO partition.  */
2712       if (DECL_WEAK (expr) && !DECL_COMDAT (expr) && DECL_EXTERNAL (expr))
2713 	{
2714 	  varpool_node *node = varpool_node::get (expr);
2715 	  if (node)
2716 	    node = node->ultimate_alias_target ();
2717 	  return !(node && node->in_other_partition);
2718 	}
2719       return false;
2720 
2721     default:
2722       return false;
2723     }
2724 }
2725 
2726 /* Return non-NULL if there is an integer operation with trapping overflow
2727    we can rewrite into non-trapping.  Called via walk_tree from
2728    rewrite_to_non_trapping_overflow.  */
2729 
2730 static tree
2731 find_trapping_overflow (tree *tp, int *walk_subtrees, void *data)
2732 {
2733   if (EXPR_P (*tp)
2734       && !operation_no_trapping_overflow (TREE_TYPE (*tp), TREE_CODE (*tp)))
2735     return *tp;
2736   if (IS_TYPE_OR_DECL_P (*tp)
2737       || (TREE_CODE (*tp) == SAVE_EXPR && data == NULL))
2738     *walk_subtrees = 0;
2739   return NULL_TREE;
2740 }
2741 
2742 /* Rewrite selected operations into unsigned arithmetics, so that they
2743    don't trap on overflow.  */
2744 
2745 static tree
2746 replace_trapping_overflow (tree *tp, int *walk_subtrees, void *data)
2747 {
2748   if (find_trapping_overflow (tp, walk_subtrees, data))
2749     {
2750       tree type = TREE_TYPE (*tp);
2751       tree utype = unsigned_type_for (type);
2752       *walk_subtrees = 0;
2753       int len = TREE_OPERAND_LENGTH (*tp);
2754       for (int i = 0; i < len; ++i)
2755 	walk_tree (&TREE_OPERAND (*tp, i), replace_trapping_overflow,
2756 		   data, (hash_set<tree> *) data);
2757 
2758       if (TREE_CODE (*tp) == ABS_EXPR)
2759 	{
2760 	  tree op = TREE_OPERAND (*tp, 0);
2761 	  op = save_expr (op);
2762 	  /* save_expr skips simple arithmetics, which is undesirable
2763 	     here, if it might trap due to flag_trapv.  We need to
2764 	     force a SAVE_EXPR in the COND_EXPR condition, to evaluate
2765 	     it before the comparison.  */
2766 	  if (EXPR_P (op)
2767 	      && TREE_CODE (op) != SAVE_EXPR
2768 	      && walk_tree (&op, find_trapping_overflow, NULL, NULL))
2769 	    {
2770 	      op = build1_loc (EXPR_LOCATION (op), SAVE_EXPR, type, op);
2771 	      TREE_SIDE_EFFECTS (op) = 1;
2772 	    }
2773 	  /* Change abs (op) to op < 0 ? -op : op and handle the NEGATE_EXPR
2774 	     like other signed integer trapping operations.  */
2775 	  tree cond = fold_build2 (LT_EXPR, boolean_type_node,
2776 				   op, build_int_cst (type, 0));
2777 	  tree neg = fold_build1 (NEGATE_EXPR, utype,
2778 				  fold_convert (utype, op));
2779 	  *tp = fold_build3 (COND_EXPR, type, cond,
2780 			     fold_convert (type, neg), op);
2781 	}
2782       else
2783 	{
2784 	  TREE_TYPE (*tp) = utype;
2785 	  len = TREE_OPERAND_LENGTH (*tp);
2786 	  for (int i = 0; i < len; ++i)
2787 	    TREE_OPERAND (*tp, i)
2788 	      = fold_convert (utype, TREE_OPERAND (*tp, i));
2789 	  *tp = fold_convert (type, *tp);
2790 	}
2791     }
2792   return NULL_TREE;
2793 }
2794 
2795 /* If any subexpression of EXPR can trap due to -ftrapv, rewrite it
2796    using unsigned arithmetics to avoid traps in it.  */
2797 
2798 tree
2799 rewrite_to_non_trapping_overflow (tree expr)
2800 {
2801   if (!flag_trapv)
2802     return expr;
2803   hash_set<tree> pset;
2804   if (!walk_tree (&expr, find_trapping_overflow, &pset, &pset))
2805     return expr;
2806   expr = unshare_expr (expr);
2807   hash_set<tree> pset2;
2808   walk_tree (&expr, replace_trapping_overflow, &pset2, &pset2);
2809   return expr;
2810 }
2811 
2812 /* Helper for stmt_could_throw_p.  Return true if STMT (assumed to be a
2813    an assignment or a conditional) may throw.  */
2814 
2815 static bool
2816 stmt_could_throw_1_p (gassign *stmt)
2817 {
2818   enum tree_code code = gimple_assign_rhs_code (stmt);
2819   bool honor_nans = false;
2820   bool honor_snans = false;
2821   bool fp_operation = false;
2822   bool honor_trapv = false;
2823   tree t;
2824   size_t i;
2825   bool handled, ret;
2826 
2827   if (TREE_CODE_CLASS (code) == tcc_comparison
2828       || TREE_CODE_CLASS (code) == tcc_unary
2829       || TREE_CODE_CLASS (code) == tcc_binary
2830       || code == FMA_EXPR)
2831     {
2832       if (TREE_CODE_CLASS (code) == tcc_comparison)
2833 	t = TREE_TYPE (gimple_assign_rhs1 (stmt));
2834       else
2835 	t = gimple_expr_type (stmt);
2836       fp_operation = FLOAT_TYPE_P (t);
2837       if (fp_operation)
2838 	{
2839 	  honor_nans = flag_trapping_math && !flag_finite_math_only;
2840 	  honor_snans = flag_signaling_nans != 0;
2841 	}
2842       else if (INTEGRAL_TYPE_P (t) && TYPE_OVERFLOW_TRAPS (t))
2843 	honor_trapv = true;
2844     }
2845 
2846   /* First check the LHS.  */
2847   if (tree_could_trap_p (gimple_assign_lhs (stmt)))
2848     return true;
2849 
2850   /* Check if the main expression may trap.  */
2851   ret = operation_could_trap_helper_p (code, fp_operation, honor_trapv,
2852 				       honor_nans, honor_snans,
2853 				       gimple_assign_rhs2 (stmt),
2854 				       &handled);
2855   if (handled)
2856     return ret;
2857 
2858   /* If the expression does not trap, see if any of the individual operands may
2859      trap.  */
2860   for (i = 1; i < gimple_num_ops (stmt); i++)
2861     if (tree_could_trap_p (gimple_op (stmt, i)))
2862       return true;
2863 
2864   return false;
2865 }
2866 
2867 
2868 /* Return true if statement STMT could throw an exception.  */
2869 
2870 bool
2871 stmt_could_throw_p (gimple *stmt)
2872 {
2873   if (!flag_exceptions)
2874     return false;
2875 
2876   /* The only statements that can throw an exception are assignments,
2877      conditionals, calls, resx, and asms.  */
2878   switch (gimple_code (stmt))
2879     {
2880     case GIMPLE_RESX:
2881       return true;
2882 
2883     case GIMPLE_CALL:
2884       return !gimple_call_nothrow_p (as_a <gcall *> (stmt));
2885 
2886     case GIMPLE_COND:
2887       {
2888 	if (!cfun->can_throw_non_call_exceptions)
2889 	  return false;
2890 	gcond *cond = as_a <gcond *> (stmt);
2891 	tree lhs = gimple_cond_lhs (cond);
2892 	return operation_could_trap_p (gimple_cond_code (cond),
2893 				       FLOAT_TYPE_P (TREE_TYPE (lhs)),
2894 				       false, NULL_TREE);
2895       }
2896 
2897     case GIMPLE_ASSIGN:
2898       if (!cfun->can_throw_non_call_exceptions
2899 	  || gimple_clobber_p (stmt))
2900         return false;
2901       return stmt_could_throw_1_p (as_a <gassign *> (stmt));
2902 
2903     case GIMPLE_ASM:
2904       if (!cfun->can_throw_non_call_exceptions)
2905         return false;
2906       return gimple_asm_volatile_p (as_a <gasm *> (stmt));
2907 
2908     default:
2909       return false;
2910     }
2911 }
2912 
2913 
2914 /* Return true if expression T could throw an exception.  */
2915 
2916 bool
2917 tree_could_throw_p (tree t)
2918 {
2919   if (!flag_exceptions)
2920     return false;
2921   if (TREE_CODE (t) == MODIFY_EXPR)
2922     {
2923       if (cfun->can_throw_non_call_exceptions
2924           && tree_could_trap_p (TREE_OPERAND (t, 0)))
2925         return true;
2926       t = TREE_OPERAND (t, 1);
2927     }
2928 
2929   if (TREE_CODE (t) == WITH_SIZE_EXPR)
2930     t = TREE_OPERAND (t, 0);
2931   if (TREE_CODE (t) == CALL_EXPR)
2932     return (call_expr_flags (t) & ECF_NOTHROW) == 0;
2933   if (cfun->can_throw_non_call_exceptions)
2934     return tree_could_trap_p (t);
2935   return false;
2936 }
2937 
2938 /* Return true if STMT can throw an exception that is not caught within
2939    the current function (CFUN).  */
2940 
2941 bool
2942 stmt_can_throw_external (gimple *stmt)
2943 {
2944   int lp_nr;
2945 
2946   if (!stmt_could_throw_p (stmt))
2947     return false;
2948 
2949   lp_nr = lookup_stmt_eh_lp (stmt);
2950   return lp_nr == 0;
2951 }
2952 
2953 /* Return true if STMT can throw an exception that is caught within
2954    the current function (CFUN).  */
2955 
2956 bool
2957 stmt_can_throw_internal (gimple *stmt)
2958 {
2959   int lp_nr;
2960 
2961   if (!stmt_could_throw_p (stmt))
2962     return false;
2963 
2964   lp_nr = lookup_stmt_eh_lp (stmt);
2965   return lp_nr > 0;
2966 }
2967 
2968 /* Given a statement STMT in IFUN, if STMT can no longer throw, then
2969    remove any entry it might have from the EH table.  Return true if
2970    any change was made.  */
2971 
2972 bool
2973 maybe_clean_eh_stmt_fn (struct function *ifun, gimple *stmt)
2974 {
2975   if (stmt_could_throw_p (stmt))
2976     return false;
2977   return remove_stmt_from_eh_lp_fn (ifun, stmt);
2978 }
2979 
2980 /* Likewise, but always use the current function.  */
2981 
2982 bool
2983 maybe_clean_eh_stmt (gimple *stmt)
2984 {
2985   return maybe_clean_eh_stmt_fn (cfun, stmt);
2986 }
2987 
2988 /* Given a statement OLD_STMT and a new statement NEW_STMT that has replaced
2989    OLD_STMT in the function, remove OLD_STMT from the EH table and put NEW_STMT
2990    in the table if it should be in there.  Return TRUE if a replacement was
2991    done that my require an EH edge purge.  */
2992 
2993 bool
2994 maybe_clean_or_replace_eh_stmt (gimple *old_stmt, gimple *new_stmt)
2995 {
2996   int lp_nr = lookup_stmt_eh_lp (old_stmt);
2997 
2998   if (lp_nr != 0)
2999     {
3000       bool new_stmt_could_throw = stmt_could_throw_p (new_stmt);
3001 
3002       if (new_stmt == old_stmt && new_stmt_could_throw)
3003 	return false;
3004 
3005       remove_stmt_from_eh_lp (old_stmt);
3006       if (new_stmt_could_throw)
3007 	{
3008 	  add_stmt_to_eh_lp (new_stmt, lp_nr);
3009 	  return false;
3010 	}
3011       else
3012 	return true;
3013     }
3014 
3015   return false;
3016 }
3017 
3018 /* Given a statement OLD_STMT in OLD_FUN and a duplicate statement NEW_STMT
3019    in NEW_FUN, copy the EH table data from OLD_STMT to NEW_STMT.  The MAP
3020    operand is the return value of duplicate_eh_regions.  */
3021 
3022 bool
3023 maybe_duplicate_eh_stmt_fn (struct function *new_fun, gimple *new_stmt,
3024 			    struct function *old_fun, gimple *old_stmt,
3025 			    hash_map<void *, void *> *map,
3026 			    int default_lp_nr)
3027 {
3028   int old_lp_nr, new_lp_nr;
3029 
3030   if (!stmt_could_throw_p (new_stmt))
3031     return false;
3032 
3033   old_lp_nr = lookup_stmt_eh_lp_fn (old_fun, old_stmt);
3034   if (old_lp_nr == 0)
3035     {
3036       if (default_lp_nr == 0)
3037 	return false;
3038       new_lp_nr = default_lp_nr;
3039     }
3040   else if (old_lp_nr > 0)
3041     {
3042       eh_landing_pad old_lp, new_lp;
3043 
3044       old_lp = (*old_fun->eh->lp_array)[old_lp_nr];
3045       new_lp = static_cast<eh_landing_pad> (*map->get (old_lp));
3046       new_lp_nr = new_lp->index;
3047     }
3048   else
3049     {
3050       eh_region old_r, new_r;
3051 
3052       old_r = (*old_fun->eh->region_array)[-old_lp_nr];
3053       new_r = static_cast<eh_region> (*map->get (old_r));
3054       new_lp_nr = -new_r->index;
3055     }
3056 
3057   add_stmt_to_eh_lp_fn (new_fun, new_stmt, new_lp_nr);
3058   return true;
3059 }
3060 
3061 /* Similar, but both OLD_STMT and NEW_STMT are within the current function,
3062    and thus no remapping is required.  */
3063 
3064 bool
3065 maybe_duplicate_eh_stmt (gimple *new_stmt, gimple *old_stmt)
3066 {
3067   int lp_nr;
3068 
3069   if (!stmt_could_throw_p (new_stmt))
3070     return false;
3071 
3072   lp_nr = lookup_stmt_eh_lp (old_stmt);
3073   if (lp_nr == 0)
3074     return false;
3075 
3076   add_stmt_to_eh_lp (new_stmt, lp_nr);
3077   return true;
3078 }
3079 
3080 /* Returns TRUE if oneh and twoh are exception handlers (gimple_try_cleanup of
3081    GIMPLE_TRY) that are similar enough to be considered the same.  Currently
3082    this only handles handlers consisting of a single call, as that's the
3083    important case for C++: a destructor call for a particular object showing
3084    up in multiple handlers.  */
3085 
3086 static bool
3087 same_handler_p (gimple_seq oneh, gimple_seq twoh)
3088 {
3089   gimple_stmt_iterator gsi;
3090   gimple *ones, *twos;
3091   unsigned int ai;
3092 
3093   gsi = gsi_start (oneh);
3094   if (!gsi_one_before_end_p (gsi))
3095     return false;
3096   ones = gsi_stmt (gsi);
3097 
3098   gsi = gsi_start (twoh);
3099   if (!gsi_one_before_end_p (gsi))
3100     return false;
3101   twos = gsi_stmt (gsi);
3102 
3103   if (!is_gimple_call (ones)
3104       || !is_gimple_call (twos)
3105       || gimple_call_lhs (ones)
3106       || gimple_call_lhs (twos)
3107       || gimple_call_chain (ones)
3108       || gimple_call_chain (twos)
3109       || !gimple_call_same_target_p (ones, twos)
3110       || gimple_call_num_args (ones) != gimple_call_num_args (twos))
3111     return false;
3112 
3113   for (ai = 0; ai < gimple_call_num_args (ones); ++ai)
3114     if (!operand_equal_p (gimple_call_arg (ones, ai),
3115                           gimple_call_arg (twos, ai), 0))
3116       return false;
3117 
3118   return true;
3119 }
3120 
3121 /* Optimize
3122     try { A() } finally { try { ~B() } catch { ~A() } }
3123     try { ... } finally { ~A() }
3124    into
3125     try { A() } catch { ~B() }
3126     try { ~B() ... } finally { ~A() }
3127 
3128    This occurs frequently in C++, where A is a local variable and B is a
3129    temporary used in the initializer for A.  */
3130 
3131 static void
3132 optimize_double_finally (gtry *one, gtry *two)
3133 {
3134   gimple *oneh;
3135   gimple_stmt_iterator gsi;
3136   gimple_seq cleanup;
3137 
3138   cleanup = gimple_try_cleanup (one);
3139   gsi = gsi_start (cleanup);
3140   if (!gsi_one_before_end_p (gsi))
3141     return;
3142 
3143   oneh = gsi_stmt (gsi);
3144   if (gimple_code (oneh) != GIMPLE_TRY
3145       || gimple_try_kind (oneh) != GIMPLE_TRY_CATCH)
3146     return;
3147 
3148   if (same_handler_p (gimple_try_cleanup (oneh), gimple_try_cleanup (two)))
3149     {
3150       gimple_seq seq = gimple_try_eval (oneh);
3151 
3152       gimple_try_set_cleanup (one, seq);
3153       gimple_try_set_kind (one, GIMPLE_TRY_CATCH);
3154       seq = copy_gimple_seq_and_replace_locals (seq);
3155       gimple_seq_add_seq (&seq, gimple_try_eval (two));
3156       gimple_try_set_eval (two, seq);
3157     }
3158 }
3159 
3160 /* Perform EH refactoring optimizations that are simpler to do when code
3161    flow has been lowered but EH structures haven't.  */
3162 
3163 static void
3164 refactor_eh_r (gimple_seq seq)
3165 {
3166   gimple_stmt_iterator gsi;
3167   gimple *one, *two;
3168 
3169   one = NULL;
3170   two = NULL;
3171   gsi = gsi_start (seq);
3172   while (1)
3173     {
3174       one = two;
3175       if (gsi_end_p (gsi))
3176 	two = NULL;
3177       else
3178 	two = gsi_stmt (gsi);
3179       if (one && two)
3180 	if (gtry *try_one = dyn_cast <gtry *> (one))
3181 	  if (gtry *try_two = dyn_cast <gtry *> (two))
3182 	    if (gimple_try_kind (try_one) == GIMPLE_TRY_FINALLY
3183 		&& gimple_try_kind (try_two) == GIMPLE_TRY_FINALLY)
3184 	      optimize_double_finally (try_one, try_two);
3185       if (one)
3186 	switch (gimple_code (one))
3187 	  {
3188 	  case GIMPLE_TRY:
3189 	    refactor_eh_r (gimple_try_eval (one));
3190 	    refactor_eh_r (gimple_try_cleanup (one));
3191 	    break;
3192 	  case GIMPLE_CATCH:
3193 	    refactor_eh_r (gimple_catch_handler (as_a <gcatch *> (one)));
3194 	    break;
3195 	  case GIMPLE_EH_FILTER:
3196 	    refactor_eh_r (gimple_eh_filter_failure (one));
3197 	    break;
3198 	  case GIMPLE_EH_ELSE:
3199 	    {
3200 	      geh_else *eh_else_stmt = as_a <geh_else *> (one);
3201 	      refactor_eh_r (gimple_eh_else_n_body (eh_else_stmt));
3202 	      refactor_eh_r (gimple_eh_else_e_body (eh_else_stmt));
3203 	    }
3204 	    break;
3205 	  default:
3206 	    break;
3207 	  }
3208       if (two)
3209 	gsi_next (&gsi);
3210       else
3211 	break;
3212     }
3213 }
3214 
3215 namespace {
3216 
3217 const pass_data pass_data_refactor_eh =
3218 {
3219   GIMPLE_PASS, /* type */
3220   "ehopt", /* name */
3221   OPTGROUP_NONE, /* optinfo_flags */
3222   TV_TREE_EH, /* tv_id */
3223   PROP_gimple_lcf, /* properties_required */
3224   0, /* properties_provided */
3225   0, /* properties_destroyed */
3226   0, /* todo_flags_start */
3227   0, /* todo_flags_finish */
3228 };
3229 
3230 class pass_refactor_eh : public gimple_opt_pass
3231 {
3232 public:
3233   pass_refactor_eh (gcc::context *ctxt)
3234     : gimple_opt_pass (pass_data_refactor_eh, ctxt)
3235   {}
3236 
3237   /* opt_pass methods: */
3238   virtual bool gate (function *) { return flag_exceptions != 0; }
3239   virtual unsigned int execute (function *)
3240     {
3241       refactor_eh_r (gimple_body (current_function_decl));
3242       return 0;
3243     }
3244 
3245 }; // class pass_refactor_eh
3246 
3247 } // anon namespace
3248 
3249 gimple_opt_pass *
3250 make_pass_refactor_eh (gcc::context *ctxt)
3251 {
3252   return new pass_refactor_eh (ctxt);
3253 }
3254 
3255 /* At the end of gimple optimization, we can lower RESX.  */
3256 
3257 static bool
3258 lower_resx (basic_block bb, gresx *stmt,
3259 	    hash_map<eh_region, tree> *mnt_map)
3260 {
3261   int lp_nr;
3262   eh_region src_r, dst_r;
3263   gimple_stmt_iterator gsi;
3264   gimple *x;
3265   tree fn, src_nr;
3266   bool ret = false;
3267 
3268   lp_nr = lookup_stmt_eh_lp (stmt);
3269   if (lp_nr != 0)
3270     dst_r = get_eh_region_from_lp_number (lp_nr);
3271   else
3272     dst_r = NULL;
3273 
3274   src_r = get_eh_region_from_number (gimple_resx_region (stmt));
3275   gsi = gsi_last_bb (bb);
3276 
3277   if (src_r == NULL)
3278     {
3279       /* We can wind up with no source region when pass_cleanup_eh shows
3280 	 that there are no entries into an eh region and deletes it, but
3281 	 then the block that contains the resx isn't removed.  This can
3282 	 happen without optimization when the switch statement created by
3283 	 lower_try_finally_switch isn't simplified to remove the eh case.
3284 
3285 	 Resolve this by expanding the resx node to an abort.  */
3286 
3287       fn = builtin_decl_implicit (BUILT_IN_TRAP);
3288       x = gimple_build_call (fn, 0);
3289       gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3290 
3291       while (EDGE_COUNT (bb->succs) > 0)
3292 	remove_edge (EDGE_SUCC (bb, 0));
3293     }
3294   else if (dst_r)
3295     {
3296       /* When we have a destination region, we resolve this by copying
3297 	 the excptr and filter values into place, and changing the edge
3298 	 to immediately after the landing pad.  */
3299       edge e;
3300 
3301       if (lp_nr < 0)
3302 	{
3303 	  basic_block new_bb;
3304 	  tree lab;
3305 
3306 	  /* We are resuming into a MUST_NOT_CALL region.  Expand a call to
3307 	     the failure decl into a new block, if needed.  */
3308 	  gcc_assert (dst_r->type == ERT_MUST_NOT_THROW);
3309 
3310 	  tree *slot = mnt_map->get (dst_r);
3311 	  if (slot == NULL)
3312 	    {
3313 	      gimple_stmt_iterator gsi2;
3314 
3315 	      new_bb = create_empty_bb (bb);
3316 	      add_bb_to_loop (new_bb, bb->loop_father);
3317 	      lab = gimple_block_label (new_bb);
3318 	      gsi2 = gsi_start_bb (new_bb);
3319 
3320 	      fn = dst_r->u.must_not_throw.failure_decl;
3321 	      x = gimple_build_call (fn, 0);
3322 	      gimple_set_location (x, dst_r->u.must_not_throw.failure_loc);
3323 	      gsi_insert_after (&gsi2, x, GSI_CONTINUE_LINKING);
3324 
3325 	      mnt_map->put (dst_r, lab);
3326 	    }
3327 	  else
3328 	    {
3329 	      lab = *slot;
3330 	      new_bb = label_to_block (lab);
3331 	    }
3332 
3333 	  gcc_assert (EDGE_COUNT (bb->succs) == 0);
3334 	  e = make_edge (bb, new_bb, EDGE_FALLTHRU);
3335 	  e->count = bb->count;
3336 	  e->probability = REG_BR_PROB_BASE;
3337 	}
3338       else
3339 	{
3340 	  edge_iterator ei;
3341 	  tree dst_nr = build_int_cst (integer_type_node, dst_r->index);
3342 
3343 	  fn = builtin_decl_implicit (BUILT_IN_EH_COPY_VALUES);
3344 	  src_nr = build_int_cst (integer_type_node, src_r->index);
3345 	  x = gimple_build_call (fn, 2, dst_nr, src_nr);
3346 	  gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3347 
3348 	  /* Update the flags for the outgoing edge.  */
3349 	  e = single_succ_edge (bb);
3350 	  gcc_assert (e->flags & EDGE_EH);
3351 	  e->flags = (e->flags & ~EDGE_EH) | EDGE_FALLTHRU;
3352 	  e->probability = REG_BR_PROB_BASE;
3353 	  e->count = bb->count;
3354 
3355 	  /* If there are no more EH users of the landing pad, delete it.  */
3356 	  FOR_EACH_EDGE (e, ei, e->dest->preds)
3357 	    if (e->flags & EDGE_EH)
3358 	      break;
3359 	  if (e == NULL)
3360 	    {
3361 	      eh_landing_pad lp = get_eh_landing_pad_from_number (lp_nr);
3362 	      remove_eh_landing_pad (lp);
3363 	    }
3364 	}
3365 
3366       ret = true;
3367     }
3368   else
3369     {
3370       tree var;
3371 
3372       /* When we don't have a destination region, this exception escapes
3373 	 up the call chain.  We resolve this by generating a call to the
3374 	 _Unwind_Resume library function.  */
3375 
3376       /* The ARM EABI redefines _Unwind_Resume as __cxa_end_cleanup
3377 	 with no arguments for C++ and Java.  Check for that.  */
3378       if (src_r->use_cxa_end_cleanup)
3379 	{
3380 	  fn = builtin_decl_implicit (BUILT_IN_CXA_END_CLEANUP);
3381 	  x = gimple_build_call (fn, 0);
3382 	  gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3383 	}
3384       else
3385 	{
3386 	  fn = builtin_decl_implicit (BUILT_IN_EH_POINTER);
3387 	  src_nr = build_int_cst (integer_type_node, src_r->index);
3388 	  x = gimple_build_call (fn, 1, src_nr);
3389 	  var = create_tmp_var (ptr_type_node);
3390 	  var = make_ssa_name (var, x);
3391 	  gimple_call_set_lhs (x, var);
3392 	  gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3393 
3394 	  /* When exception handling is delegated to a caller function, we
3395 	     have to guarantee that shadow memory variables living on stack
3396 	     will be cleaner before control is given to a parent function.  */
3397 	  if ((flag_sanitize & SANITIZE_ADDRESS) != 0
3398 	      && !lookup_attribute ("no_sanitize_address",
3399 				    DECL_ATTRIBUTES (current_function_decl)))
3400 	    {
3401 	      tree decl
3402 		= builtin_decl_implicit (BUILT_IN_ASAN_HANDLE_NO_RETURN);
3403 	      gimple *g = gimple_build_call (decl, 0);
3404 	      gimple_set_location (g, gimple_location (stmt));
3405 	      gsi_insert_before (&gsi, g, GSI_SAME_STMT);
3406 	    }
3407 
3408 	  fn = builtin_decl_implicit (BUILT_IN_UNWIND_RESUME);
3409 	  x = gimple_build_call (fn, 1, var);
3410 	  gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3411 	}
3412 
3413       gcc_assert (EDGE_COUNT (bb->succs) == 0);
3414     }
3415 
3416   gsi_remove (&gsi, true);
3417 
3418   return ret;
3419 }
3420 
3421 namespace {
3422 
3423 const pass_data pass_data_lower_resx =
3424 {
3425   GIMPLE_PASS, /* type */
3426   "resx", /* name */
3427   OPTGROUP_NONE, /* optinfo_flags */
3428   TV_TREE_EH, /* tv_id */
3429   PROP_gimple_lcf, /* properties_required */
3430   0, /* properties_provided */
3431   0, /* properties_destroyed */
3432   0, /* todo_flags_start */
3433   0, /* todo_flags_finish */
3434 };
3435 
3436 class pass_lower_resx : public gimple_opt_pass
3437 {
3438 public:
3439   pass_lower_resx (gcc::context *ctxt)
3440     : gimple_opt_pass (pass_data_lower_resx, ctxt)
3441   {}
3442 
3443   /* opt_pass methods: */
3444   virtual bool gate (function *) { return flag_exceptions != 0; }
3445   virtual unsigned int execute (function *);
3446 
3447 }; // class pass_lower_resx
3448 
3449 unsigned
3450 pass_lower_resx::execute (function *fun)
3451 {
3452   basic_block bb;
3453   bool dominance_invalidated = false;
3454   bool any_rewritten = false;
3455 
3456   hash_map<eh_region, tree> mnt_map;
3457 
3458   FOR_EACH_BB_FN (bb, fun)
3459     {
3460       gimple *last = last_stmt (bb);
3461       if (last && is_gimple_resx (last))
3462 	{
3463 	  dominance_invalidated |=
3464 	    lower_resx (bb, as_a <gresx *> (last), &mnt_map);
3465 	  any_rewritten = true;
3466 	}
3467     }
3468 
3469   if (dominance_invalidated)
3470     {
3471       free_dominance_info (CDI_DOMINATORS);
3472       free_dominance_info (CDI_POST_DOMINATORS);
3473     }
3474 
3475   return any_rewritten ? TODO_update_ssa_only_virtuals : 0;
3476 }
3477 
3478 } // anon namespace
3479 
3480 gimple_opt_pass *
3481 make_pass_lower_resx (gcc::context *ctxt)
3482 {
3483   return new pass_lower_resx (ctxt);
3484 }
3485 
3486 /* Try to optimize var = {v} {CLOBBER} stmts followed just by
3487    external throw.  */
3488 
3489 static void
3490 optimize_clobbers (basic_block bb)
3491 {
3492   gimple_stmt_iterator gsi = gsi_last_bb (bb);
3493   bool any_clobbers = false;
3494   bool seen_stack_restore = false;
3495   edge_iterator ei;
3496   edge e;
3497 
3498   /* Only optimize anything if the bb contains at least one clobber,
3499      ends with resx (checked by caller), optionally contains some
3500      debug stmts or labels, or at most one __builtin_stack_restore
3501      call, and has an incoming EH edge.  */
3502   for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3503     {
3504       gimple *stmt = gsi_stmt (gsi);
3505       if (is_gimple_debug (stmt))
3506 	continue;
3507       if (gimple_clobber_p (stmt))
3508 	{
3509 	  any_clobbers = true;
3510 	  continue;
3511 	}
3512       if (!seen_stack_restore
3513 	  && gimple_call_builtin_p (stmt, BUILT_IN_STACK_RESTORE))
3514 	{
3515 	  seen_stack_restore = true;
3516 	  continue;
3517 	}
3518       if (gimple_code (stmt) == GIMPLE_LABEL)
3519 	break;
3520       return;
3521     }
3522   if (!any_clobbers)
3523     return;
3524   FOR_EACH_EDGE (e, ei, bb->preds)
3525     if (e->flags & EDGE_EH)
3526       break;
3527   if (e == NULL)
3528     return;
3529   gsi = gsi_last_bb (bb);
3530   for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3531     {
3532       gimple *stmt = gsi_stmt (gsi);
3533       if (!gimple_clobber_p (stmt))
3534 	continue;
3535       unlink_stmt_vdef (stmt);
3536       gsi_remove (&gsi, true);
3537       release_defs (stmt);
3538     }
3539 }
3540 
3541 /* Try to sink var = {v} {CLOBBER} stmts followed just by
3542    internal throw to successor BB.  */
3543 
3544 static int
3545 sink_clobbers (basic_block bb)
3546 {
3547   edge e;
3548   edge_iterator ei;
3549   gimple_stmt_iterator gsi, dgsi;
3550   basic_block succbb;
3551   bool any_clobbers = false;
3552   unsigned todo = 0;
3553 
3554   /* Only optimize if BB has a single EH successor and
3555      all predecessor edges are EH too.  */
3556   if (!single_succ_p (bb)
3557       || (single_succ_edge (bb)->flags & EDGE_EH) == 0)
3558     return 0;
3559 
3560   FOR_EACH_EDGE (e, ei, bb->preds)
3561     {
3562       if ((e->flags & EDGE_EH) == 0)
3563 	return 0;
3564     }
3565 
3566   /* And BB contains only CLOBBER stmts before the final
3567      RESX.  */
3568   gsi = gsi_last_bb (bb);
3569   for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3570     {
3571       gimple *stmt = gsi_stmt (gsi);
3572       if (is_gimple_debug (stmt))
3573 	continue;
3574       if (gimple_code (stmt) == GIMPLE_LABEL)
3575 	break;
3576       if (!gimple_clobber_p (stmt))
3577 	return 0;
3578       any_clobbers = true;
3579     }
3580   if (!any_clobbers)
3581     return 0;
3582 
3583   edge succe = single_succ_edge (bb);
3584   succbb = succe->dest;
3585 
3586   /* See if there is a virtual PHI node to take an updated virtual
3587      operand from.  */
3588   gphi *vphi = NULL;
3589   tree vuse = NULL_TREE;
3590   for (gphi_iterator gpi = gsi_start_phis (succbb);
3591        !gsi_end_p (gpi); gsi_next (&gpi))
3592     {
3593       tree res = gimple_phi_result (gpi.phi ());
3594       if (virtual_operand_p (res))
3595 	{
3596 	  vphi = gpi.phi ();
3597 	  vuse = res;
3598 	  break;
3599 	}
3600     }
3601 
3602   dgsi = gsi_after_labels (succbb);
3603   gsi = gsi_last_bb (bb);
3604   for (gsi_prev (&gsi); !gsi_end_p (gsi); gsi_prev (&gsi))
3605     {
3606       gimple *stmt = gsi_stmt (gsi);
3607       tree lhs;
3608       if (is_gimple_debug (stmt))
3609 	continue;
3610       if (gimple_code (stmt) == GIMPLE_LABEL)
3611 	break;
3612       lhs = gimple_assign_lhs (stmt);
3613       /* Unfortunately we don't have dominance info updated at this
3614 	 point, so checking if
3615 	 dominated_by_p (CDI_DOMINATORS, succbb,
3616 			 gimple_bb (SSA_NAME_DEF_STMT (TREE_OPERAND (lhs, 0)))
3617 	 would be too costly.  Thus, avoid sinking any clobbers that
3618 	 refer to non-(D) SSA_NAMEs.  */
3619       if (TREE_CODE (lhs) == MEM_REF
3620 	  && TREE_CODE (TREE_OPERAND (lhs, 0)) == SSA_NAME
3621 	  && !SSA_NAME_IS_DEFAULT_DEF (TREE_OPERAND (lhs, 0)))
3622 	{
3623 	  unlink_stmt_vdef (stmt);
3624 	  gsi_remove (&gsi, true);
3625 	  release_defs (stmt);
3626 	  continue;
3627 	}
3628 
3629       /* As we do not change stmt order when sinking across a
3630          forwarder edge we can keep virtual operands in place.  */
3631       gsi_remove (&gsi, false);
3632       gsi_insert_before (&dgsi, stmt, GSI_NEW_STMT);
3633 
3634       /* But adjust virtual operands if we sunk across a PHI node.  */
3635       if (vuse)
3636 	{
3637 	  gimple *use_stmt;
3638 	  imm_use_iterator iter;
3639 	  use_operand_p use_p;
3640 	  FOR_EACH_IMM_USE_STMT (use_stmt, iter, vuse)
3641 	    FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
3642 	      SET_USE (use_p, gimple_vdef (stmt));
3643 	  if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (vuse))
3644 	    {
3645 	      SSA_NAME_OCCURS_IN_ABNORMAL_PHI (gimple_vdef (stmt)) = 1;
3646 	      SSA_NAME_OCCURS_IN_ABNORMAL_PHI (vuse) = 0;
3647 	    }
3648 	  /* Adjust the incoming virtual operand.  */
3649 	  SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (vphi, succe), gimple_vuse (stmt));
3650 	  SET_USE (gimple_vuse_op (stmt), vuse);
3651 	}
3652       /* If there isn't a single predecessor but no virtual PHI node
3653          arrange for virtual operands to be renamed.  */
3654       else if (gimple_vuse_op (stmt) != NULL_USE_OPERAND_P
3655 	       && !single_pred_p (succbb))
3656 	{
3657 	  /* In this case there will be no use of the VDEF of this stmt.
3658 	     ???  Unless this is a secondary opportunity and we have not
3659 	     removed unreachable blocks yet, so we cannot assert this.
3660 	     Which also means we will end up renaming too many times.  */
3661 	  SET_USE (gimple_vuse_op (stmt), gimple_vop (cfun));
3662 	  mark_virtual_operands_for_renaming (cfun);
3663 	  todo |= TODO_update_ssa_only_virtuals;
3664 	}
3665     }
3666 
3667   return todo;
3668 }
3669 
3670 /* At the end of inlining, we can lower EH_DISPATCH.  Return true when
3671    we have found some duplicate labels and removed some edges.  */
3672 
3673 static bool
3674 lower_eh_dispatch (basic_block src, geh_dispatch *stmt)
3675 {
3676   gimple_stmt_iterator gsi;
3677   int region_nr;
3678   eh_region r;
3679   tree filter, fn;
3680   gimple *x;
3681   bool redirected = false;
3682 
3683   region_nr = gimple_eh_dispatch_region (stmt);
3684   r = get_eh_region_from_number (region_nr);
3685 
3686   gsi = gsi_last_bb (src);
3687 
3688   switch (r->type)
3689     {
3690     case ERT_TRY:
3691       {
3692 	auto_vec<tree> labels;
3693 	tree default_label = NULL;
3694 	eh_catch c;
3695 	edge_iterator ei;
3696 	edge e;
3697 	hash_set<tree> seen_values;
3698 
3699 	/* Collect the labels for a switch.  Zero the post_landing_pad
3700 	   field becase we'll no longer have anything keeping these labels
3701 	   in existence and the optimizer will be free to merge these
3702 	   blocks at will.  */
3703 	for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
3704 	  {
3705 	    tree tp_node, flt_node, lab = c->label;
3706 	    bool have_label = false;
3707 
3708 	    c->label = NULL;
3709 	    tp_node = c->type_list;
3710 	    flt_node = c->filter_list;
3711 
3712 	    if (tp_node == NULL)
3713 	      {
3714 	        default_label = lab;
3715 		break;
3716 	      }
3717 	    do
3718 	      {
3719 		/* Filter out duplicate labels that arise when this handler
3720 		   is shadowed by an earlier one.  When no labels are
3721 		   attached to the handler anymore, we remove
3722 		   the corresponding edge and then we delete unreachable
3723 		   blocks at the end of this pass.  */
3724 		if (! seen_values.contains (TREE_VALUE (flt_node)))
3725 		  {
3726 		    tree t = build_case_label (TREE_VALUE (flt_node),
3727 					       NULL, lab);
3728 		    labels.safe_push (t);
3729 		    seen_values.add (TREE_VALUE (flt_node));
3730 		    have_label = true;
3731 		  }
3732 
3733 		tp_node = TREE_CHAIN (tp_node);
3734 		flt_node = TREE_CHAIN (flt_node);
3735 	      }
3736 	    while (tp_node);
3737 	    if (! have_label)
3738 	      {
3739 	        remove_edge (find_edge (src, label_to_block (lab)));
3740 	        redirected = true;
3741 	      }
3742 	  }
3743 
3744 	/* Clean up the edge flags.  */
3745 	FOR_EACH_EDGE (e, ei, src->succs)
3746 	  {
3747 	    if (e->flags & EDGE_FALLTHRU)
3748 	      {
3749 		/* If there was no catch-all, use the fallthru edge.  */
3750 		if (default_label == NULL)
3751 		  default_label = gimple_block_label (e->dest);
3752 		e->flags &= ~EDGE_FALLTHRU;
3753 	      }
3754 	  }
3755 	gcc_assert (default_label != NULL);
3756 
3757 	/* Don't generate a switch if there's only a default case.
3758 	   This is common in the form of try { A; } catch (...) { B; }.  */
3759 	if (!labels.exists ())
3760 	  {
3761 	    e = single_succ_edge (src);
3762 	    e->flags |= EDGE_FALLTHRU;
3763 	  }
3764 	else
3765 	  {
3766 	    fn = builtin_decl_implicit (BUILT_IN_EH_FILTER);
3767 	    x = gimple_build_call (fn, 1, build_int_cst (integer_type_node,
3768 							 region_nr));
3769 	    filter = create_tmp_var (TREE_TYPE (TREE_TYPE (fn)));
3770 	    filter = make_ssa_name (filter, x);
3771 	    gimple_call_set_lhs (x, filter);
3772 	    gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3773 
3774 	    /* Turn the default label into a default case.  */
3775 	    default_label = build_case_label (NULL, NULL, default_label);
3776 	    sort_case_labels (labels);
3777 
3778 	    x = gimple_build_switch (filter, default_label, labels);
3779 	    gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3780 	  }
3781       }
3782       break;
3783 
3784     case ERT_ALLOWED_EXCEPTIONS:
3785       {
3786 	edge b_e = BRANCH_EDGE (src);
3787 	edge f_e = FALLTHRU_EDGE (src);
3788 
3789 	fn = builtin_decl_implicit (BUILT_IN_EH_FILTER);
3790 	x = gimple_build_call (fn, 1, build_int_cst (integer_type_node,
3791 						     region_nr));
3792 	filter = create_tmp_var (TREE_TYPE (TREE_TYPE (fn)));
3793 	filter = make_ssa_name (filter, x);
3794 	gimple_call_set_lhs (x, filter);
3795 	gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3796 
3797 	r->u.allowed.label = NULL;
3798 	x = gimple_build_cond (EQ_EXPR, filter,
3799 			       build_int_cst (TREE_TYPE (filter),
3800 					      r->u.allowed.filter),
3801 			       NULL_TREE, NULL_TREE);
3802 	gsi_insert_before (&gsi, x, GSI_SAME_STMT);
3803 
3804 	b_e->flags = b_e->flags | EDGE_TRUE_VALUE;
3805         f_e->flags = (f_e->flags & ~EDGE_FALLTHRU) | EDGE_FALSE_VALUE;
3806       }
3807       break;
3808 
3809     default:
3810       gcc_unreachable ();
3811     }
3812 
3813   /* Replace the EH_DISPATCH with the SWITCH or COND generated above.  */
3814   gsi_remove (&gsi, true);
3815   return redirected;
3816 }
3817 
3818 namespace {
3819 
3820 const pass_data pass_data_lower_eh_dispatch =
3821 {
3822   GIMPLE_PASS, /* type */
3823   "ehdisp", /* name */
3824   OPTGROUP_NONE, /* optinfo_flags */
3825   TV_TREE_EH, /* tv_id */
3826   PROP_gimple_lcf, /* properties_required */
3827   0, /* properties_provided */
3828   0, /* properties_destroyed */
3829   0, /* todo_flags_start */
3830   0, /* todo_flags_finish */
3831 };
3832 
3833 class pass_lower_eh_dispatch : public gimple_opt_pass
3834 {
3835 public:
3836   pass_lower_eh_dispatch (gcc::context *ctxt)
3837     : gimple_opt_pass (pass_data_lower_eh_dispatch, ctxt)
3838   {}
3839 
3840   /* opt_pass methods: */
3841   virtual bool gate (function *fun) { return fun->eh->region_tree != NULL; }
3842   virtual unsigned int execute (function *);
3843 
3844 }; // class pass_lower_eh_dispatch
3845 
3846 unsigned
3847 pass_lower_eh_dispatch::execute (function *fun)
3848 {
3849   basic_block bb;
3850   int flags = 0;
3851   bool redirected = false;
3852 
3853   assign_filter_values ();
3854 
3855   FOR_EACH_BB_FN (bb, fun)
3856     {
3857       gimple *last = last_stmt (bb);
3858       if (last == NULL)
3859 	continue;
3860       if (gimple_code (last) == GIMPLE_EH_DISPATCH)
3861 	{
3862 	  redirected |= lower_eh_dispatch (bb,
3863 					   as_a <geh_dispatch *> (last));
3864 	  flags |= TODO_update_ssa_only_virtuals;
3865 	}
3866       else if (gimple_code (last) == GIMPLE_RESX)
3867 	{
3868 	  if (stmt_can_throw_external (last))
3869 	    optimize_clobbers (bb);
3870 	  else
3871 	    flags |= sink_clobbers (bb);
3872 	}
3873     }
3874 
3875   if (redirected)
3876     delete_unreachable_blocks ();
3877   return flags;
3878 }
3879 
3880 } // anon namespace
3881 
3882 gimple_opt_pass *
3883 make_pass_lower_eh_dispatch (gcc::context *ctxt)
3884 {
3885   return new pass_lower_eh_dispatch (ctxt);
3886 }
3887 
3888 /* Walk statements, see what regions and, optionally, landing pads
3889    are really referenced.
3890 
3891    Returns in R_REACHABLEP an sbitmap with bits set for reachable regions,
3892    and in LP_REACHABLE an sbitmap with bits set for reachable landing pads.
3893 
3894    Passing NULL for LP_REACHABLE is valid, in this case only reachable
3895    regions are marked.
3896 
3897    The caller is responsible for freeing the returned sbitmaps.  */
3898 
3899 static void
3900 mark_reachable_handlers (sbitmap *r_reachablep, sbitmap *lp_reachablep)
3901 {
3902   sbitmap r_reachable, lp_reachable;
3903   basic_block bb;
3904   bool mark_landing_pads = (lp_reachablep != NULL);
3905   gcc_checking_assert (r_reachablep != NULL);
3906 
3907   r_reachable = sbitmap_alloc (cfun->eh->region_array->length ());
3908   bitmap_clear (r_reachable);
3909   *r_reachablep = r_reachable;
3910 
3911   if (mark_landing_pads)
3912     {
3913       lp_reachable = sbitmap_alloc (cfun->eh->lp_array->length ());
3914       bitmap_clear (lp_reachable);
3915       *lp_reachablep = lp_reachable;
3916     }
3917   else
3918     lp_reachable = NULL;
3919 
3920   FOR_EACH_BB_FN (bb, cfun)
3921     {
3922       gimple_stmt_iterator gsi;
3923 
3924       for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3925 	{
3926 	  gimple *stmt = gsi_stmt (gsi);
3927 
3928 	  if (mark_landing_pads)
3929 	    {
3930 	      int lp_nr = lookup_stmt_eh_lp (stmt);
3931 
3932 	      /* Negative LP numbers are MUST_NOT_THROW regions which
3933 		 are not considered BB enders.  */
3934 	      if (lp_nr < 0)
3935 		bitmap_set_bit (r_reachable, -lp_nr);
3936 
3937 	      /* Positive LP numbers are real landing pads, and BB enders.  */
3938 	      else if (lp_nr > 0)
3939 		{
3940 		  gcc_assert (gsi_one_before_end_p (gsi));
3941 		  eh_region region = get_eh_region_from_lp_number (lp_nr);
3942 		  bitmap_set_bit (r_reachable, region->index);
3943 		  bitmap_set_bit (lp_reachable, lp_nr);
3944 		}
3945 	    }
3946 
3947 	  /* Avoid removing regions referenced from RESX/EH_DISPATCH.  */
3948 	  switch (gimple_code (stmt))
3949 	    {
3950 	    case GIMPLE_RESX:
3951 	      bitmap_set_bit (r_reachable,
3952 			      gimple_resx_region (as_a <gresx *> (stmt)));
3953 	      break;
3954 	    case GIMPLE_EH_DISPATCH:
3955 	      bitmap_set_bit (r_reachable,
3956 			      gimple_eh_dispatch_region (
3957                                 as_a <geh_dispatch *> (stmt)));
3958 	      break;
3959 	    case GIMPLE_CALL:
3960 	      if (gimple_call_builtin_p (stmt, BUILT_IN_EH_COPY_VALUES))
3961 		for (int i = 0; i < 2; ++i)
3962 		  {
3963 		    tree rt = gimple_call_arg (stmt, i);
3964 		    HOST_WIDE_INT ri = tree_to_shwi (rt);
3965 
3966 		    gcc_assert (ri == (int)ri);
3967 		    bitmap_set_bit (r_reachable, ri);
3968 		  }
3969 	      break;
3970 	    default:
3971 	      break;
3972 	    }
3973 	}
3974     }
3975 }
3976 
3977 /* Remove unreachable handlers and unreachable landing pads.  */
3978 
3979 static void
3980 remove_unreachable_handlers (void)
3981 {
3982   sbitmap r_reachable, lp_reachable;
3983   eh_region region;
3984   eh_landing_pad lp;
3985   unsigned i;
3986 
3987   mark_reachable_handlers (&r_reachable, &lp_reachable);
3988 
3989   if (dump_file)
3990     {
3991       fprintf (dump_file, "Before removal of unreachable regions:\n");
3992       dump_eh_tree (dump_file, cfun);
3993       fprintf (dump_file, "Reachable regions: ");
3994       dump_bitmap_file (dump_file, r_reachable);
3995       fprintf (dump_file, "Reachable landing pads: ");
3996       dump_bitmap_file (dump_file, lp_reachable);
3997     }
3998 
3999   if (dump_file)
4000     {
4001       FOR_EACH_VEC_SAFE_ELT (cfun->eh->region_array, i, region)
4002 	if (region && !bitmap_bit_p (r_reachable, region->index))
4003 	  fprintf (dump_file,
4004 		   "Removing unreachable region %d\n",
4005 		   region->index);
4006     }
4007 
4008   remove_unreachable_eh_regions (r_reachable);
4009 
4010   FOR_EACH_VEC_SAFE_ELT (cfun->eh->lp_array, i, lp)
4011     if (lp && !bitmap_bit_p (lp_reachable, lp->index))
4012       {
4013 	if (dump_file)
4014 	  fprintf (dump_file,
4015 		   "Removing unreachable landing pad %d\n",
4016 		   lp->index);
4017 	remove_eh_landing_pad (lp);
4018       }
4019 
4020   if (dump_file)
4021     {
4022       fprintf (dump_file, "\n\nAfter removal of unreachable regions:\n");
4023       dump_eh_tree (dump_file, cfun);
4024       fprintf (dump_file, "\n\n");
4025     }
4026 
4027   sbitmap_free (r_reachable);
4028   sbitmap_free (lp_reachable);
4029 
4030   if (flag_checking)
4031     verify_eh_tree (cfun);
4032 }
4033 
4034 /* Remove unreachable handlers if any landing pads have been removed after
4035    last ehcleanup pass (due to gimple_purge_dead_eh_edges).  */
4036 
4037 void
4038 maybe_remove_unreachable_handlers (void)
4039 {
4040   eh_landing_pad lp;
4041   unsigned i;
4042 
4043   if (cfun->eh == NULL)
4044     return;
4045 
4046   FOR_EACH_VEC_SAFE_ELT (cfun->eh->lp_array, i, lp)
4047     if (lp && lp->post_landing_pad)
4048       {
4049 	if (label_to_block (lp->post_landing_pad) == NULL)
4050 	  {
4051 	    remove_unreachable_handlers ();
4052 	    return;
4053 	  }
4054       }
4055 }
4056 
4057 /* Remove regions that do not have landing pads.  This assumes
4058    that remove_unreachable_handlers has already been run, and
4059    that we've just manipulated the landing pads since then.
4060 
4061    Preserve regions with landing pads and regions that prevent
4062    exceptions from propagating further, even if these regions
4063    are not reachable.  */
4064 
4065 static void
4066 remove_unreachable_handlers_no_lp (void)
4067 {
4068   eh_region region;
4069   sbitmap r_reachable;
4070   unsigned i;
4071 
4072   mark_reachable_handlers (&r_reachable, /*lp_reachablep=*/NULL);
4073 
4074   FOR_EACH_VEC_SAFE_ELT (cfun->eh->region_array, i, region)
4075     {
4076       if (! region)
4077 	continue;
4078 
4079       if (region->landing_pads != NULL
4080 	  || region->type == ERT_MUST_NOT_THROW)
4081 	bitmap_set_bit (r_reachable, region->index);
4082 
4083       if (dump_file
4084 	  && !bitmap_bit_p (r_reachable, region->index))
4085 	fprintf (dump_file,
4086 		 "Removing unreachable region %d\n",
4087 		 region->index);
4088     }
4089 
4090   remove_unreachable_eh_regions (r_reachable);
4091 
4092   sbitmap_free (r_reachable);
4093 }
4094 
4095 /* Undo critical edge splitting on an EH landing pad.  Earlier, we
4096    optimisticaly split all sorts of edges, including EH edges.  The
4097    optimization passes in between may not have needed them; if not,
4098    we should undo the split.
4099 
4100    Recognize this case by having one EH edge incoming to the BB and
4101    one normal edge outgoing; BB should be empty apart from the
4102    post_landing_pad label.
4103 
4104    Note that this is slightly different from the empty handler case
4105    handled by cleanup_empty_eh, in that the actual handler may yet
4106    have actual code but the landing pad has been separated from the
4107    handler.  As such, cleanup_empty_eh relies on this transformation
4108    having been done first.  */
4109 
4110 static bool
4111 unsplit_eh (eh_landing_pad lp)
4112 {
4113   basic_block bb = label_to_block (lp->post_landing_pad);
4114   gimple_stmt_iterator gsi;
4115   edge e_in, e_out;
4116 
4117   /* Quickly check the edge counts on BB for singularity.  */
4118   if (!single_pred_p (bb) || !single_succ_p (bb))
4119     return false;
4120   e_in = single_pred_edge (bb);
4121   e_out = single_succ_edge (bb);
4122 
4123   /* Input edge must be EH and output edge must be normal.  */
4124   if ((e_in->flags & EDGE_EH) == 0 || (e_out->flags & EDGE_EH) != 0)
4125     return false;
4126 
4127   /* The block must be empty except for the labels and debug insns.  */
4128   gsi = gsi_after_labels (bb);
4129   if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
4130     gsi_next_nondebug (&gsi);
4131   if (!gsi_end_p (gsi))
4132     return false;
4133 
4134   /* The destination block must not already have a landing pad
4135      for a different region.  */
4136   for (gsi = gsi_start_bb (e_out->dest); !gsi_end_p (gsi); gsi_next (&gsi))
4137     {
4138       glabel *label_stmt = dyn_cast <glabel *> (gsi_stmt (gsi));
4139       tree lab;
4140       int lp_nr;
4141 
4142       if (!label_stmt)
4143 	break;
4144       lab = gimple_label_label (label_stmt);
4145       lp_nr = EH_LANDING_PAD_NR (lab);
4146       if (lp_nr && get_eh_region_from_lp_number (lp_nr) != lp->region)
4147 	return false;
4148     }
4149 
4150   /* The new destination block must not already be a destination of
4151      the source block, lest we merge fallthru and eh edges and get
4152      all sorts of confused.  */
4153   if (find_edge (e_in->src, e_out->dest))
4154     return false;
4155 
4156   /* ??? We can get degenerate phis due to cfg cleanups.  I would have
4157      thought this should have been cleaned up by a phicprop pass, but
4158      that doesn't appear to handle virtuals.  Propagate by hand.  */
4159   if (!gimple_seq_empty_p (phi_nodes (bb)))
4160     {
4161       for (gphi_iterator gpi = gsi_start_phis (bb); !gsi_end_p (gpi); )
4162 	{
4163 	  gimple *use_stmt;
4164 	  gphi *phi = gpi.phi ();
4165 	  tree lhs = gimple_phi_result (phi);
4166 	  tree rhs = gimple_phi_arg_def (phi, 0);
4167 	  use_operand_p use_p;
4168 	  imm_use_iterator iter;
4169 
4170 	  FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
4171 	    {
4172 	      FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
4173 		SET_USE (use_p, rhs);
4174 	    }
4175 
4176 	  if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
4177 	    SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs) = 1;
4178 
4179 	  remove_phi_node (&gpi, true);
4180 	}
4181     }
4182 
4183   if (dump_file && (dump_flags & TDF_DETAILS))
4184     fprintf (dump_file, "Unsplit EH landing pad %d to block %i.\n",
4185 	     lp->index, e_out->dest->index);
4186 
4187   /* Redirect the edge.  Since redirect_eh_edge_1 expects to be moving
4188      a successor edge, humor it.  But do the real CFG change with the
4189      predecessor of E_OUT in order to preserve the ordering of arguments
4190      to the PHI nodes in E_OUT->DEST.  */
4191   redirect_eh_edge_1 (e_in, e_out->dest, false);
4192   redirect_edge_pred (e_out, e_in->src);
4193   e_out->flags = e_in->flags;
4194   e_out->probability = e_in->probability;
4195   e_out->count = e_in->count;
4196   remove_edge (e_in);
4197 
4198   return true;
4199 }
4200 
4201 /* Examine each landing pad block and see if it matches unsplit_eh.  */
4202 
4203 static bool
4204 unsplit_all_eh (void)
4205 {
4206   bool changed = false;
4207   eh_landing_pad lp;
4208   int i;
4209 
4210   for (i = 1; vec_safe_iterate (cfun->eh->lp_array, i, &lp); ++i)
4211     if (lp)
4212       changed |= unsplit_eh (lp);
4213 
4214   return changed;
4215 }
4216 
4217 /* A subroutine of cleanup_empty_eh.  Redirect all EH edges incoming
4218    to OLD_BB to NEW_BB; return true on success, false on failure.
4219 
4220    OLD_BB_OUT is the edge into NEW_BB from OLD_BB, so if we miss any
4221    PHI variables from OLD_BB we can pick them up from OLD_BB_OUT.
4222    Virtual PHIs may be deleted and marked for renaming.  */
4223 
4224 static bool
4225 cleanup_empty_eh_merge_phis (basic_block new_bb, basic_block old_bb,
4226 			     edge old_bb_out, bool change_region)
4227 {
4228   gphi_iterator ngsi, ogsi;
4229   edge_iterator ei;
4230   edge e;
4231   bitmap ophi_handled;
4232 
4233   /* The destination block must not be a regular successor for any
4234      of the preds of the landing pad.  Thus, avoid turning
4235         <..>
4236 	 |  \ EH
4237 	 |  <..>
4238 	 |  /
4239 	<..>
4240      into
4241         <..>
4242 	|  | EH
4243 	<..>
4244      which CFG verification would choke on.  See PR45172 and PR51089.  */
4245   FOR_EACH_EDGE (e, ei, old_bb->preds)
4246     if (find_edge (e->src, new_bb))
4247       return false;
4248 
4249   FOR_EACH_EDGE (e, ei, old_bb->preds)
4250     redirect_edge_var_map_clear (e);
4251 
4252   ophi_handled = BITMAP_ALLOC (NULL);
4253 
4254   /* First, iterate through the PHIs on NEW_BB and set up the edge_var_map
4255      for the edges we're going to move.  */
4256   for (ngsi = gsi_start_phis (new_bb); !gsi_end_p (ngsi); gsi_next (&ngsi))
4257     {
4258       gphi *ophi, *nphi = ngsi.phi ();
4259       tree nresult, nop;
4260 
4261       nresult = gimple_phi_result (nphi);
4262       nop = gimple_phi_arg_def (nphi, old_bb_out->dest_idx);
4263 
4264       /* Find the corresponding PHI in OLD_BB so we can forward-propagate
4265 	 the source ssa_name.  */
4266       ophi = NULL;
4267       for (ogsi = gsi_start_phis (old_bb); !gsi_end_p (ogsi); gsi_next (&ogsi))
4268 	{
4269 	  ophi = ogsi.phi ();
4270 	  if (gimple_phi_result (ophi) == nop)
4271 	    break;
4272 	  ophi = NULL;
4273 	}
4274 
4275       /* If we did find the corresponding PHI, copy those inputs.  */
4276       if (ophi)
4277 	{
4278 	  /* If NOP is used somewhere else beyond phis in new_bb, give up.  */
4279 	  if (!has_single_use (nop))
4280 	    {
4281 	      imm_use_iterator imm_iter;
4282 	      use_operand_p use_p;
4283 
4284 	      FOR_EACH_IMM_USE_FAST (use_p, imm_iter, nop)
4285 		{
4286 		  if (!gimple_debug_bind_p (USE_STMT (use_p))
4287 		      && (gimple_code (USE_STMT (use_p)) != GIMPLE_PHI
4288 			  || gimple_bb (USE_STMT (use_p)) != new_bb))
4289 		    goto fail;
4290 		}
4291 	    }
4292 	  bitmap_set_bit (ophi_handled, SSA_NAME_VERSION (nop));
4293 	  FOR_EACH_EDGE (e, ei, old_bb->preds)
4294 	    {
4295 	      location_t oloc;
4296 	      tree oop;
4297 
4298 	      if ((e->flags & EDGE_EH) == 0)
4299 		continue;
4300 	      oop = gimple_phi_arg_def (ophi, e->dest_idx);
4301 	      oloc = gimple_phi_arg_location (ophi, e->dest_idx);
4302 	      redirect_edge_var_map_add (e, nresult, oop, oloc);
4303 	    }
4304 	}
4305       /* If we didn't find the PHI, if it's a real variable or a VOP, we know
4306 	 from the fact that OLD_BB is tree_empty_eh_handler_p that the
4307 	 variable is unchanged from input to the block and we can simply
4308 	 re-use the input to NEW_BB from the OLD_BB_OUT edge.  */
4309       else
4310 	{
4311 	  location_t nloc
4312 	    = gimple_phi_arg_location (nphi, old_bb_out->dest_idx);
4313 	  FOR_EACH_EDGE (e, ei, old_bb->preds)
4314 	    redirect_edge_var_map_add (e, nresult, nop, nloc);
4315 	}
4316     }
4317 
4318   /* Second, verify that all PHIs from OLD_BB have been handled.  If not,
4319      we don't know what values from the other edges into NEW_BB to use.  */
4320   for (ogsi = gsi_start_phis (old_bb); !gsi_end_p (ogsi); gsi_next (&ogsi))
4321     {
4322       gphi *ophi = ogsi.phi ();
4323       tree oresult = gimple_phi_result (ophi);
4324       if (!bitmap_bit_p (ophi_handled, SSA_NAME_VERSION (oresult)))
4325 	goto fail;
4326     }
4327 
4328   /* Finally, move the edges and update the PHIs.  */
4329   for (ei = ei_start (old_bb->preds); (e = ei_safe_edge (ei)); )
4330     if (e->flags & EDGE_EH)
4331       {
4332 	/* ???  CFG manipluation routines do not try to update loop
4333 	   form on edge redirection.  Do so manually here for now.  */
4334 	/* If we redirect a loop entry or latch edge that will either create
4335 	   a multiple entry loop or rotate the loop.  If the loops merge
4336 	   we may have created a loop with multiple latches.
4337 	   All of this isn't easily fixed thus cancel the affected loop
4338 	   and mark the other loop as possibly having multiple latches.  */
4339 	if (e->dest == e->dest->loop_father->header)
4340 	  {
4341 	    mark_loop_for_removal (e->dest->loop_father);
4342 	    new_bb->loop_father->latch = NULL;
4343 	    loops_state_set (LOOPS_MAY_HAVE_MULTIPLE_LATCHES);
4344 	  }
4345 	redirect_eh_edge_1 (e, new_bb, change_region);
4346 	redirect_edge_succ (e, new_bb);
4347 	flush_pending_stmts (e);
4348       }
4349     else
4350       ei_next (&ei);
4351 
4352   BITMAP_FREE (ophi_handled);
4353   return true;
4354 
4355  fail:
4356   FOR_EACH_EDGE (e, ei, old_bb->preds)
4357     redirect_edge_var_map_clear (e);
4358   BITMAP_FREE (ophi_handled);
4359   return false;
4360 }
4361 
4362 /* A subroutine of cleanup_empty_eh.  Move a landing pad LP from its
4363    old region to NEW_REGION at BB.  */
4364 
4365 static void
4366 cleanup_empty_eh_move_lp (basic_block bb, edge e_out,
4367 			  eh_landing_pad lp, eh_region new_region)
4368 {
4369   gimple_stmt_iterator gsi;
4370   eh_landing_pad *pp;
4371 
4372   for (pp = &lp->region->landing_pads; *pp != lp; pp = &(*pp)->next_lp)
4373     continue;
4374   *pp = lp->next_lp;
4375 
4376   lp->region = new_region;
4377   lp->next_lp = new_region->landing_pads;
4378   new_region->landing_pads = lp;
4379 
4380   /* Delete the RESX that was matched within the empty handler block.  */
4381   gsi = gsi_last_bb (bb);
4382   unlink_stmt_vdef (gsi_stmt (gsi));
4383   gsi_remove (&gsi, true);
4384 
4385   /* Clean up E_OUT for the fallthru.  */
4386   e_out->flags = (e_out->flags & ~EDGE_EH) | EDGE_FALLTHRU;
4387   e_out->probability = REG_BR_PROB_BASE;
4388   e_out->count = e_out->src->count;
4389 }
4390 
4391 /* A subroutine of cleanup_empty_eh.  Handle more complex cases of
4392    unsplitting than unsplit_eh was prepared to handle, e.g. when
4393    multiple incoming edges and phis are involved.  */
4394 
4395 static bool
4396 cleanup_empty_eh_unsplit (basic_block bb, edge e_out, eh_landing_pad lp)
4397 {
4398   gimple_stmt_iterator gsi;
4399   tree lab;
4400 
4401   /* We really ought not have totally lost everything following
4402      a landing pad label.  Given that BB is empty, there had better
4403      be a successor.  */
4404   gcc_assert (e_out != NULL);
4405 
4406   /* The destination block must not already have a landing pad
4407      for a different region.  */
4408   lab = NULL;
4409   for (gsi = gsi_start_bb (e_out->dest); !gsi_end_p (gsi); gsi_next (&gsi))
4410     {
4411       glabel *stmt = dyn_cast <glabel *> (gsi_stmt (gsi));
4412       int lp_nr;
4413 
4414       if (!stmt)
4415 	break;
4416       lab = gimple_label_label (stmt);
4417       lp_nr = EH_LANDING_PAD_NR (lab);
4418       if (lp_nr && get_eh_region_from_lp_number (lp_nr) != lp->region)
4419 	return false;
4420     }
4421 
4422   /* Attempt to move the PHIs into the successor block.  */
4423   if (cleanup_empty_eh_merge_phis (e_out->dest, bb, e_out, false))
4424     {
4425       if (dump_file && (dump_flags & TDF_DETAILS))
4426 	fprintf (dump_file,
4427 		 "Unsplit EH landing pad %d to block %i "
4428 		 "(via cleanup_empty_eh).\n",
4429 		 lp->index, e_out->dest->index);
4430       return true;
4431     }
4432 
4433   return false;
4434 }
4435 
4436 /* Return true if edge E_FIRST is part of an empty infinite loop
4437    or leads to such a loop through a series of single successor
4438    empty bbs.  */
4439 
4440 static bool
4441 infinite_empty_loop_p (edge e_first)
4442 {
4443   bool inf_loop = false;
4444   edge e;
4445 
4446   if (e_first->dest == e_first->src)
4447     return true;
4448 
4449   e_first->src->aux = (void *) 1;
4450   for (e = e_first; single_succ_p (e->dest); e = single_succ_edge (e->dest))
4451     {
4452       gimple_stmt_iterator gsi;
4453       if (e->dest->aux)
4454 	{
4455 	  inf_loop = true;
4456 	  break;
4457 	}
4458       e->dest->aux = (void *) 1;
4459       gsi = gsi_after_labels (e->dest);
4460       if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
4461 	gsi_next_nondebug (&gsi);
4462       if (!gsi_end_p (gsi))
4463 	break;
4464     }
4465   e_first->src->aux = NULL;
4466   for (e = e_first; e->dest->aux; e = single_succ_edge (e->dest))
4467     e->dest->aux = NULL;
4468 
4469   return inf_loop;
4470 }
4471 
4472 /* Examine the block associated with LP to determine if it's an empty
4473    handler for its EH region.  If so, attempt to redirect EH edges to
4474    an outer region.  Return true the CFG was updated in any way.  This
4475    is similar to jump forwarding, just across EH edges.  */
4476 
4477 static bool
4478 cleanup_empty_eh (eh_landing_pad lp)
4479 {
4480   basic_block bb = label_to_block (lp->post_landing_pad);
4481   gimple_stmt_iterator gsi;
4482   gimple *resx;
4483   eh_region new_region;
4484   edge_iterator ei;
4485   edge e, e_out;
4486   bool has_non_eh_pred;
4487   bool ret = false;
4488   int new_lp_nr;
4489 
4490   /* There can be zero or one edges out of BB.  This is the quickest test.  */
4491   switch (EDGE_COUNT (bb->succs))
4492     {
4493     case 0:
4494       e_out = NULL;
4495       break;
4496     case 1:
4497       e_out = single_succ_edge (bb);
4498       break;
4499     default:
4500       return false;
4501     }
4502 
4503   gsi = gsi_last_nondebug_bb (bb);
4504   resx = gsi_stmt (gsi);
4505   if (resx && is_gimple_resx (resx))
4506     {
4507       if (stmt_can_throw_external (resx))
4508 	optimize_clobbers (bb);
4509       else if (sink_clobbers (bb))
4510 	ret = true;
4511     }
4512 
4513   gsi = gsi_after_labels (bb);
4514 
4515   /* Make sure to skip debug statements.  */
4516   if (!gsi_end_p (gsi) && is_gimple_debug (gsi_stmt (gsi)))
4517     gsi_next_nondebug (&gsi);
4518 
4519   /* If the block is totally empty, look for more unsplitting cases.  */
4520   if (gsi_end_p (gsi))
4521     {
4522       /* For the degenerate case of an infinite loop bail out.
4523 	 If bb has no successors and is totally empty, which can happen e.g.
4524 	 because of incorrect noreturn attribute, bail out too.  */
4525       if (e_out == NULL
4526 	  || infinite_empty_loop_p (e_out))
4527 	return ret;
4528 
4529       return ret | cleanup_empty_eh_unsplit (bb, e_out, lp);
4530     }
4531 
4532   /* The block should consist only of a single RESX statement, modulo a
4533      preceding call to __builtin_stack_restore if there is no outgoing
4534      edge, since the call can be eliminated in this case.  */
4535   resx = gsi_stmt (gsi);
4536   if (!e_out && gimple_call_builtin_p (resx, BUILT_IN_STACK_RESTORE))
4537     {
4538       gsi_next_nondebug (&gsi);
4539       resx = gsi_stmt (gsi);
4540     }
4541   if (!is_gimple_resx (resx))
4542     return ret;
4543   gcc_assert (gsi_one_nondebug_before_end_p (gsi));
4544 
4545   /* Determine if there are non-EH edges, or resx edges into the handler.  */
4546   has_non_eh_pred = false;
4547   FOR_EACH_EDGE (e, ei, bb->preds)
4548     if (!(e->flags & EDGE_EH))
4549       has_non_eh_pred = true;
4550 
4551   /* Find the handler that's outer of the empty handler by looking at
4552      where the RESX instruction was vectored.  */
4553   new_lp_nr = lookup_stmt_eh_lp (resx);
4554   new_region = get_eh_region_from_lp_number (new_lp_nr);
4555 
4556   /* If there's no destination region within the current function,
4557      redirection is trivial via removing the throwing statements from
4558      the EH region, removing the EH edges, and allowing the block
4559      to go unreachable.  */
4560   if (new_region == NULL)
4561     {
4562       gcc_assert (e_out == NULL);
4563       for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
4564 	if (e->flags & EDGE_EH)
4565 	  {
4566 	    gimple *stmt = last_stmt (e->src);
4567 	    remove_stmt_from_eh_lp (stmt);
4568 	    remove_edge (e);
4569 	  }
4570 	else
4571 	  ei_next (&ei);
4572       goto succeed;
4573     }
4574 
4575   /* If the destination region is a MUST_NOT_THROW, allow the runtime
4576      to handle the abort and allow the blocks to go unreachable.  */
4577   if (new_region->type == ERT_MUST_NOT_THROW)
4578     {
4579       for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei)); )
4580 	if (e->flags & EDGE_EH)
4581 	  {
4582 	    gimple *stmt = last_stmt (e->src);
4583 	    remove_stmt_from_eh_lp (stmt);
4584 	    add_stmt_to_eh_lp (stmt, new_lp_nr);
4585 	    remove_edge (e);
4586 	  }
4587 	else
4588 	  ei_next (&ei);
4589       goto succeed;
4590     }
4591 
4592   /* Try to redirect the EH edges and merge the PHIs into the destination
4593      landing pad block.  If the merge succeeds, we'll already have redirected
4594      all the EH edges.  The handler itself will go unreachable if there were
4595      no normal edges.  */
4596   if (cleanup_empty_eh_merge_phis (e_out->dest, bb, e_out, true))
4597     goto succeed;
4598 
4599   /* Finally, if all input edges are EH edges, then we can (potentially)
4600      reduce the number of transfers from the runtime by moving the landing
4601      pad from the original region to the new region.  This is a win when
4602      we remove the last CLEANUP region along a particular exception
4603      propagation path.  Since nothing changes except for the region with
4604      which the landing pad is associated, the PHI nodes do not need to be
4605      adjusted at all.  */
4606   if (!has_non_eh_pred)
4607     {
4608       cleanup_empty_eh_move_lp (bb, e_out, lp, new_region);
4609       if (dump_file && (dump_flags & TDF_DETAILS))
4610 	fprintf (dump_file, "Empty EH handler %i moved to EH region %i.\n",
4611 		 lp->index, new_region->index);
4612 
4613       /* ??? The CFG didn't change, but we may have rendered the
4614 	 old EH region unreachable.  Trigger a cleanup there.  */
4615       return true;
4616     }
4617 
4618   return ret;
4619 
4620  succeed:
4621   if (dump_file && (dump_flags & TDF_DETAILS))
4622     fprintf (dump_file, "Empty EH handler %i removed.\n", lp->index);
4623   remove_eh_landing_pad (lp);
4624   return true;
4625 }
4626 
4627 /* Do a post-order traversal of the EH region tree.  Examine each
4628    post_landing_pad block and see if we can eliminate it as empty.  */
4629 
4630 static bool
4631 cleanup_all_empty_eh (void)
4632 {
4633   bool changed = false;
4634   eh_landing_pad lp;
4635   int i;
4636 
4637   for (i = 1; vec_safe_iterate (cfun->eh->lp_array, i, &lp); ++i)
4638     if (lp)
4639       changed |= cleanup_empty_eh (lp);
4640 
4641   return changed;
4642 }
4643 
4644 /* Perform cleanups and lowering of exception handling
4645     1) cleanups regions with handlers doing nothing are optimized out
4646     2) MUST_NOT_THROW regions that became dead because of 1) are optimized out
4647     3) Info about regions that are containing instructions, and regions
4648        reachable via local EH edges is collected
4649     4) Eh tree is pruned for regions no longer necessary.
4650 
4651    TODO: Push MUST_NOT_THROW regions to the root of the EH tree.
4652 	 Unify those that have the same failure decl and locus.
4653 */
4654 
4655 static unsigned int
4656 execute_cleanup_eh_1 (void)
4657 {
4658   /* Do this first: unsplit_all_eh and cleanup_all_empty_eh can die
4659      looking up unreachable landing pads.  */
4660   remove_unreachable_handlers ();
4661 
4662   /* Watch out for the region tree vanishing due to all unreachable.  */
4663   if (cfun->eh->region_tree)
4664     {
4665       bool changed = false;
4666 
4667       if (optimize)
4668 	changed |= unsplit_all_eh ();
4669       changed |= cleanup_all_empty_eh ();
4670 
4671       if (changed)
4672 	{
4673 	  free_dominance_info (CDI_DOMINATORS);
4674 	  free_dominance_info (CDI_POST_DOMINATORS);
4675 
4676           /* We delayed all basic block deletion, as we may have performed
4677 	     cleanups on EH edges while non-EH edges were still present.  */
4678 	  delete_unreachable_blocks ();
4679 
4680 	  /* We manipulated the landing pads.  Remove any region that no
4681 	     longer has a landing pad.  */
4682 	  remove_unreachable_handlers_no_lp ();
4683 
4684 	  return TODO_cleanup_cfg | TODO_update_ssa_only_virtuals;
4685 	}
4686     }
4687 
4688   return 0;
4689 }
4690 
4691 namespace {
4692 
4693 const pass_data pass_data_cleanup_eh =
4694 {
4695   GIMPLE_PASS, /* type */
4696   "ehcleanup", /* name */
4697   OPTGROUP_NONE, /* optinfo_flags */
4698   TV_TREE_EH, /* tv_id */
4699   PROP_gimple_lcf, /* properties_required */
4700   0, /* properties_provided */
4701   0, /* properties_destroyed */
4702   0, /* todo_flags_start */
4703   0, /* todo_flags_finish */
4704 };
4705 
4706 class pass_cleanup_eh : public gimple_opt_pass
4707 {
4708 public:
4709   pass_cleanup_eh (gcc::context *ctxt)
4710     : gimple_opt_pass (pass_data_cleanup_eh, ctxt)
4711   {}
4712 
4713   /* opt_pass methods: */
4714   opt_pass * clone () { return new pass_cleanup_eh (m_ctxt); }
4715   virtual bool gate (function *fun)
4716     {
4717       return fun->eh != NULL && fun->eh->region_tree != NULL;
4718     }
4719 
4720   virtual unsigned int execute (function *);
4721 
4722 }; // class pass_cleanup_eh
4723 
4724 unsigned int
4725 pass_cleanup_eh::execute (function *fun)
4726 {
4727   int ret = execute_cleanup_eh_1 ();
4728 
4729   /* If the function no longer needs an EH personality routine
4730      clear it.  This exposes cross-language inlining opportunities
4731      and avoids references to a never defined personality routine.  */
4732   if (DECL_FUNCTION_PERSONALITY (current_function_decl)
4733       && function_needs_eh_personality (fun) != eh_personality_lang)
4734     DECL_FUNCTION_PERSONALITY (current_function_decl) = NULL_TREE;
4735 
4736   return ret;
4737 }
4738 
4739 } // anon namespace
4740 
4741 gimple_opt_pass *
4742 make_pass_cleanup_eh (gcc::context *ctxt)
4743 {
4744   return new pass_cleanup_eh (ctxt);
4745 }
4746 
4747 /* Verify that BB containing STMT as the last statement, has precisely the
4748    edge that make_eh_edges would create.  */
4749 
4750 DEBUG_FUNCTION bool
4751 verify_eh_edges (gimple *stmt)
4752 {
4753   basic_block bb = gimple_bb (stmt);
4754   eh_landing_pad lp = NULL;
4755   int lp_nr;
4756   edge_iterator ei;
4757   edge e, eh_edge;
4758 
4759   lp_nr = lookup_stmt_eh_lp (stmt);
4760   if (lp_nr > 0)
4761     lp = get_eh_landing_pad_from_number (lp_nr);
4762 
4763   eh_edge = NULL;
4764   FOR_EACH_EDGE (e, ei, bb->succs)
4765     {
4766       if (e->flags & EDGE_EH)
4767 	{
4768 	  if (eh_edge)
4769 	    {
4770 	      error ("BB %i has multiple EH edges", bb->index);
4771 	      return true;
4772 	    }
4773 	  else
4774 	    eh_edge = e;
4775 	}
4776     }
4777 
4778   if (lp == NULL)
4779     {
4780       if (eh_edge)
4781 	{
4782 	  error ("BB %i can not throw but has an EH edge", bb->index);
4783 	  return true;
4784 	}
4785       return false;
4786     }
4787 
4788   if (!stmt_could_throw_p (stmt))
4789     {
4790       error ("BB %i last statement has incorrectly set lp", bb->index);
4791       return true;
4792     }
4793 
4794   if (eh_edge == NULL)
4795     {
4796       error ("BB %i is missing an EH edge", bb->index);
4797       return true;
4798     }
4799 
4800   if (eh_edge->dest != label_to_block (lp->post_landing_pad))
4801     {
4802       error ("Incorrect EH edge %i->%i", bb->index, eh_edge->dest->index);
4803       return true;
4804     }
4805 
4806   return false;
4807 }
4808 
4809 /* Similarly, but handle GIMPLE_EH_DISPATCH specifically.  */
4810 
4811 DEBUG_FUNCTION bool
4812 verify_eh_dispatch_edge (geh_dispatch *stmt)
4813 {
4814   eh_region r;
4815   eh_catch c;
4816   basic_block src, dst;
4817   bool want_fallthru = true;
4818   edge_iterator ei;
4819   edge e, fall_edge;
4820 
4821   r = get_eh_region_from_number (gimple_eh_dispatch_region (stmt));
4822   src = gimple_bb (stmt);
4823 
4824   FOR_EACH_EDGE (e, ei, src->succs)
4825     gcc_assert (e->aux == NULL);
4826 
4827   switch (r->type)
4828     {
4829     case ERT_TRY:
4830       for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
4831 	{
4832 	  dst = label_to_block (c->label);
4833 	  e = find_edge (src, dst);
4834 	  if (e == NULL)
4835 	    {
4836 	      error ("BB %i is missing an edge", src->index);
4837 	      return true;
4838 	    }
4839 	  e->aux = (void *)e;
4840 
4841 	  /* A catch-all handler doesn't have a fallthru.  */
4842 	  if (c->type_list == NULL)
4843 	    {
4844 	      want_fallthru = false;
4845 	      break;
4846 	    }
4847 	}
4848       break;
4849 
4850     case ERT_ALLOWED_EXCEPTIONS:
4851       dst = label_to_block (r->u.allowed.label);
4852       e = find_edge (src, dst);
4853       if (e == NULL)
4854 	{
4855 	  error ("BB %i is missing an edge", src->index);
4856 	  return true;
4857 	}
4858       e->aux = (void *)e;
4859       break;
4860 
4861     default:
4862       gcc_unreachable ();
4863     }
4864 
4865   fall_edge = NULL;
4866   FOR_EACH_EDGE (e, ei, src->succs)
4867     {
4868       if (e->flags & EDGE_FALLTHRU)
4869 	{
4870 	  if (fall_edge != NULL)
4871 	    {
4872 	      error ("BB %i too many fallthru edges", src->index);
4873 	      return true;
4874 	    }
4875 	  fall_edge = e;
4876 	}
4877       else if (e->aux)
4878 	e->aux = NULL;
4879       else
4880 	{
4881 	  error ("BB %i has incorrect edge", src->index);
4882 	  return true;
4883 	}
4884     }
4885   if ((fall_edge != NULL) ^ want_fallthru)
4886     {
4887       error ("BB %i has incorrect fallthru edge", src->index);
4888       return true;
4889     }
4890 
4891   return false;
4892 }
4893