xref: /netbsd-src/external/gpl3/gcc.old/dist/gcc/dwarf2cfi.c (revision cef8759bd76c1b621f8eab8faa6f208faabc2e15)
1 /* Dwarf2 Call Frame Information helper routines.
2    Copyright (C) 1992-2017 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "target.h"
24 #include "function.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "tree-pass.h"
28 #include "memmodel.h"
29 #include "tm_p.h"
30 #include "emit-rtl.h"
31 #include "stor-layout.h"
32 #include "cfgbuild.h"
33 #include "dwarf2out.h"
34 #include "dwarf2asm.h"
35 #include "common/common-target.h"
36 
37 #include "except.h"		/* expand_builtin_dwarf_sp_column */
38 #include "expr.h"		/* init_return_column_size */
39 #include "output.h"		/* asm_out_file */
40 #include "debug.h"		/* dwarf2out_do_frame, dwarf2out_do_cfi_asm */
41 
42 
43 /* ??? Poison these here until it can be done generically.  They've been
44    totally replaced in this file; make sure it stays that way.  */
45 #undef DWARF2_UNWIND_INFO
46 #undef DWARF2_FRAME_INFO
47 #if (GCC_VERSION >= 3000)
48  #pragma GCC poison DWARF2_UNWIND_INFO DWARF2_FRAME_INFO
49 #endif
50 
51 #ifndef INCOMING_RETURN_ADDR_RTX
52 #define INCOMING_RETURN_ADDR_RTX  (gcc_unreachable (), NULL_RTX)
53 #endif
54 
55 /* Maximum size (in bytes) of an artificially generated label.  */
56 #define MAX_ARTIFICIAL_LABEL_BYTES	30
57 
58 /* A collected description of an entire row of the abstract CFI table.  */
59 struct GTY(()) dw_cfi_row
60 {
61   /* The expression that computes the CFA, expressed in two different ways.
62      The CFA member for the simple cases, and the full CFI expression for
63      the complex cases.  The later will be a DW_CFA_cfa_expression.  */
64   dw_cfa_location cfa;
65   dw_cfi_ref cfa_cfi;
66 
67   /* The expressions for any register column that is saved.  */
68   cfi_vec reg_save;
69 };
70 
71 /* The caller's ORIG_REG is saved in SAVED_IN_REG.  */
72 struct GTY(()) reg_saved_in_data {
73   rtx orig_reg;
74   rtx saved_in_reg;
75 };
76 
77 
78 /* Since we no longer have a proper CFG, we're going to create a facsimile
79    of one on the fly while processing the frame-related insns.
80 
81    We create dw_trace_info structures for each extended basic block beginning
82    and ending at a "save point".  Save points are labels, barriers, certain
83    notes, and of course the beginning and end of the function.
84 
85    As we encounter control transfer insns, we propagate the "current"
86    row state across the edges to the starts of traces.  When checking is
87    enabled, we validate that we propagate the same data from all sources.
88 
89    All traces are members of the TRACE_INFO array, in the order in which
90    they appear in the instruction stream.
91 
92    All save points are present in the TRACE_INDEX hash, mapping the insn
93    starting a trace to the dw_trace_info describing the trace.  */
94 
95 struct dw_trace_info
96 {
97   /* The insn that begins the trace.  */
98   rtx_insn *head;
99 
100   /* The row state at the beginning and end of the trace.  */
101   dw_cfi_row *beg_row, *end_row;
102 
103   /* Tracking for DW_CFA_GNU_args_size.  The "true" sizes are those we find
104      while scanning insns.  However, the args_size value is irrelevant at
105      any point except can_throw_internal_p insns.  Therefore the "delay"
106      sizes the values that must actually be emitted for this trace.  */
107   HOST_WIDE_INT beg_true_args_size, end_true_args_size;
108   HOST_WIDE_INT beg_delay_args_size, end_delay_args_size;
109 
110   /* The first EH insn in the trace, where beg_delay_args_size must be set.  */
111   rtx_insn *eh_head;
112 
113   /* The following variables contain data used in interpreting frame related
114      expressions.  These are not part of the "real" row state as defined by
115      Dwarf, but it seems like they need to be propagated into a trace in case
116      frame related expressions have been sunk.  */
117   /* ??? This seems fragile.  These variables are fragments of a larger
118      expression.  If we do not keep the entire expression together, we risk
119      not being able to put it together properly.  Consider forcing targets
120      to generate self-contained expressions and dropping all of the magic
121      interpretation code in this file.  Or at least refusing to shrink wrap
122      any frame related insn that doesn't contain a complete expression.  */
123 
124   /* The register used for saving registers to the stack, and its offset
125      from the CFA.  */
126   dw_cfa_location cfa_store;
127 
128   /* A temporary register holding an integral value used in adjusting SP
129      or setting up the store_reg.  The "offset" field holds the integer
130      value, not an offset.  */
131   dw_cfa_location cfa_temp;
132 
133   /* A set of registers saved in other registers.  This is the inverse of
134      the row->reg_save info, if the entry is a DW_CFA_register.  This is
135      implemented as a flat array because it normally contains zero or 1
136      entry, depending on the target.  IA-64 is the big spender here, using
137      a maximum of 5 entries.  */
138   vec<reg_saved_in_data> regs_saved_in_regs;
139 
140   /* An identifier for this trace.  Used only for debugging dumps.  */
141   unsigned id;
142 
143   /* True if this trace immediately follows NOTE_INSN_SWITCH_TEXT_SECTIONS.  */
144   bool switch_sections;
145 
146   /* True if we've seen different values incoming to beg_true_args_size.  */
147   bool args_size_undefined;
148 };
149 
150 
151 /* Hashtable helpers.  */
152 
153 struct trace_info_hasher : nofree_ptr_hash <dw_trace_info>
154 {
155   static inline hashval_t hash (const dw_trace_info *);
156   static inline bool equal (const dw_trace_info *, const dw_trace_info *);
157 };
158 
159 inline hashval_t
160 trace_info_hasher::hash (const dw_trace_info *ti)
161 {
162   return INSN_UID (ti->head);
163 }
164 
165 inline bool
166 trace_info_hasher::equal (const dw_trace_info *a, const dw_trace_info *b)
167 {
168   return a->head == b->head;
169 }
170 
171 
172 /* The variables making up the pseudo-cfg, as described above.  */
173 static vec<dw_trace_info> trace_info;
174 static vec<dw_trace_info *> trace_work_list;
175 static hash_table<trace_info_hasher> *trace_index;
176 
177 /* A vector of call frame insns for the CIE.  */
178 cfi_vec cie_cfi_vec;
179 
180 /* The state of the first row of the FDE table, which includes the
181    state provided by the CIE.  */
182 static GTY(()) dw_cfi_row *cie_cfi_row;
183 
184 static GTY(()) reg_saved_in_data *cie_return_save;
185 
186 static GTY(()) unsigned long dwarf2out_cfi_label_num;
187 
188 /* The insn after which a new CFI note should be emitted.  */
189 static rtx_insn *add_cfi_insn;
190 
191 /* When non-null, add_cfi will add the CFI to this vector.  */
192 static cfi_vec *add_cfi_vec;
193 
194 /* The current instruction trace.  */
195 static dw_trace_info *cur_trace;
196 
197 /* The current, i.e. most recently generated, row of the CFI table.  */
198 static dw_cfi_row *cur_row;
199 
200 /* A copy of the current CFA, for use during the processing of a
201    single insn.  */
202 static dw_cfa_location *cur_cfa;
203 
204 /* We delay emitting a register save until either (a) we reach the end
205    of the prologue or (b) the register is clobbered.  This clusters
206    register saves so that there are fewer pc advances.  */
207 
208 struct queued_reg_save {
209   rtx reg;
210   rtx saved_reg;
211   HOST_WIDE_INT cfa_offset;
212 };
213 
214 
215 static vec<queued_reg_save> queued_reg_saves;
216 
217 /* True if any CFI directives were emitted at the current insn.  */
218 static bool any_cfis_emitted;
219 
220 /* Short-hand for commonly used register numbers.  */
221 static unsigned dw_stack_pointer_regnum;
222 static unsigned dw_frame_pointer_regnum;
223 
224 /* Hook used by __throw.  */
225 
226 rtx
227 expand_builtin_dwarf_sp_column (void)
228 {
229   unsigned int dwarf_regnum = DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM);
230   return GEN_INT (DWARF2_FRAME_REG_OUT (dwarf_regnum, 1));
231 }
232 
233 /* MEM is a memory reference for the register size table, each element of
234    which has mode MODE.  Initialize column C as a return address column.  */
235 
236 static void
237 init_return_column_size (machine_mode mode, rtx mem, unsigned int c)
238 {
239   HOST_WIDE_INT offset = c * GET_MODE_SIZE (mode);
240   HOST_WIDE_INT size = GET_MODE_SIZE (Pmode);
241   emit_move_insn (adjust_address (mem, mode, offset),
242 		  gen_int_mode (size, mode));
243 }
244 
245 /* Datastructure used by expand_builtin_init_dwarf_reg_sizes and
246    init_one_dwarf_reg_size to communicate on what has been done by the
247    latter.  */
248 
249 struct init_one_dwarf_reg_state
250 {
251   /* Whether the dwarf return column was initialized.  */
252   bool wrote_return_column;
253 
254   /* For each hard register REGNO, whether init_one_dwarf_reg_size
255      was given REGNO to process already.  */
256   bool processed_regno [FIRST_PSEUDO_REGISTER];
257 
258 };
259 
260 /* Helper for expand_builtin_init_dwarf_reg_sizes.  Generate code to
261    initialize the dwarf register size table entry corresponding to register
262    REGNO in REGMODE.  TABLE is the table base address, SLOTMODE is the mode to
263    use for the size entry to initialize, and INIT_STATE is the communication
264    datastructure conveying what we're doing to our caller.  */
265 
266 static
267 void init_one_dwarf_reg_size (int regno, machine_mode regmode,
268 			      rtx table, machine_mode slotmode,
269 			      init_one_dwarf_reg_state *init_state)
270 {
271   const unsigned int dnum = DWARF_FRAME_REGNUM (regno);
272   const unsigned int rnum = DWARF2_FRAME_REG_OUT (dnum, 1);
273   const unsigned int dcol = DWARF_REG_TO_UNWIND_COLUMN (rnum);
274 
275   const HOST_WIDE_INT slotoffset = dcol * GET_MODE_SIZE (slotmode);
276   const HOST_WIDE_INT regsize = GET_MODE_SIZE (regmode);
277 
278   init_state->processed_regno[regno] = true;
279 
280   if (rnum >= DWARF_FRAME_REGISTERS)
281     return;
282 
283   if (dnum == DWARF_FRAME_RETURN_COLUMN)
284     {
285       if (regmode == VOIDmode)
286 	return;
287       init_state->wrote_return_column = true;
288     }
289 
290   if (slotoffset < 0)
291     return;
292 
293   emit_move_insn (adjust_address (table, slotmode, slotoffset),
294 		  gen_int_mode (regsize, slotmode));
295 }
296 
297 /* Generate code to initialize the dwarf register size table located
298    at the provided ADDRESS.  */
299 
300 void
301 expand_builtin_init_dwarf_reg_sizes (tree address)
302 {
303   unsigned int i;
304   machine_mode mode = TYPE_MODE (char_type_node);
305   rtx addr = expand_normal (address);
306   rtx mem = gen_rtx_MEM (BLKmode, addr);
307 
308   init_one_dwarf_reg_state init_state;
309 
310   memset ((char *)&init_state, 0, sizeof (init_state));
311 
312   for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
313     {
314       machine_mode save_mode;
315       rtx span;
316 
317       /* No point in processing a register multiple times.  This could happen
318 	 with register spans, e.g. when a reg is first processed as a piece of
319 	 a span, then as a register on its own later on.  */
320 
321       if (init_state.processed_regno[i])
322 	continue;
323 
324       save_mode = targetm.dwarf_frame_reg_mode (i);
325       span = targetm.dwarf_register_span (gen_rtx_REG (save_mode, i));
326 
327       if (!span)
328 	init_one_dwarf_reg_size (i, save_mode, mem, mode, &init_state);
329       else
330 	{
331 	  for (int si = 0; si < XVECLEN (span, 0); si++)
332 	    {
333 	      rtx reg = XVECEXP (span, 0, si);
334 
335 	      init_one_dwarf_reg_size
336 		(REGNO (reg), GET_MODE (reg), mem, mode, &init_state);
337 	    }
338 	}
339     }
340 
341   if (!init_state.wrote_return_column)
342     init_return_column_size (mode, mem, DWARF_FRAME_RETURN_COLUMN);
343 
344 #ifdef DWARF_ALT_FRAME_RETURN_COLUMN
345   init_return_column_size (mode, mem, DWARF_ALT_FRAME_RETURN_COLUMN);
346 #endif
347 
348   targetm.init_dwarf_reg_sizes_extra (address);
349 }
350 
351 
352 static dw_trace_info *
353 get_trace_info (rtx_insn *insn)
354 {
355   dw_trace_info dummy;
356   dummy.head = insn;
357   return trace_index->find_with_hash (&dummy, INSN_UID (insn));
358 }
359 
360 static bool
361 save_point_p (rtx_insn *insn)
362 {
363   /* Labels, except those that are really jump tables.  */
364   if (LABEL_P (insn))
365     return inside_basic_block_p (insn);
366 
367   /* We split traces at the prologue/epilogue notes because those
368      are points at which the unwind info is usually stable.  This
369      makes it easier to find spots with identical unwind info so
370      that we can use remember/restore_state opcodes.  */
371   if (NOTE_P (insn))
372     switch (NOTE_KIND (insn))
373       {
374       case NOTE_INSN_PROLOGUE_END:
375       case NOTE_INSN_EPILOGUE_BEG:
376 	return true;
377       }
378 
379   return false;
380 }
381 
382 /* Divide OFF by DWARF_CIE_DATA_ALIGNMENT, asserting no remainder.  */
383 
384 static inline HOST_WIDE_INT
385 div_data_align (HOST_WIDE_INT off)
386 {
387   HOST_WIDE_INT r = off / DWARF_CIE_DATA_ALIGNMENT;
388   gcc_assert (r * DWARF_CIE_DATA_ALIGNMENT == off);
389   return r;
390 }
391 
392 /* Return true if we need a signed version of a given opcode
393    (e.g. DW_CFA_offset_extended_sf vs DW_CFA_offset_extended).  */
394 
395 static inline bool
396 need_data_align_sf_opcode (HOST_WIDE_INT off)
397 {
398   return DWARF_CIE_DATA_ALIGNMENT < 0 ? off > 0 : off < 0;
399 }
400 
401 /* Return a pointer to a newly allocated Call Frame Instruction.  */
402 
403 static inline dw_cfi_ref
404 new_cfi (void)
405 {
406   dw_cfi_ref cfi = ggc_alloc<dw_cfi_node> ();
407 
408   cfi->dw_cfi_oprnd1.dw_cfi_reg_num = 0;
409   cfi->dw_cfi_oprnd2.dw_cfi_reg_num = 0;
410 
411   return cfi;
412 }
413 
414 /* Return a newly allocated CFI row, with no defined data.  */
415 
416 static dw_cfi_row *
417 new_cfi_row (void)
418 {
419   dw_cfi_row *row = ggc_cleared_alloc<dw_cfi_row> ();
420 
421   row->cfa.reg = INVALID_REGNUM;
422 
423   return row;
424 }
425 
426 /* Return a copy of an existing CFI row.  */
427 
428 static dw_cfi_row *
429 copy_cfi_row (dw_cfi_row *src)
430 {
431   dw_cfi_row *dst = ggc_alloc<dw_cfi_row> ();
432 
433   *dst = *src;
434   dst->reg_save = vec_safe_copy (src->reg_save);
435 
436   return dst;
437 }
438 
439 /* Generate a new label for the CFI info to refer to.  */
440 
441 static char *
442 dwarf2out_cfi_label (void)
443 {
444   int num = dwarf2out_cfi_label_num++;
445   char label[20];
446 
447   ASM_GENERATE_INTERNAL_LABEL (label, "LCFI", num);
448 
449   return xstrdup (label);
450 }
451 
452 /* Add CFI either to the current insn stream or to a vector, or both.  */
453 
454 static void
455 add_cfi (dw_cfi_ref cfi)
456 {
457   any_cfis_emitted = true;
458 
459   if (add_cfi_insn != NULL)
460     {
461       add_cfi_insn = emit_note_after (NOTE_INSN_CFI, add_cfi_insn);
462       NOTE_CFI (add_cfi_insn) = cfi;
463     }
464 
465   if (add_cfi_vec != NULL)
466     vec_safe_push (*add_cfi_vec, cfi);
467 }
468 
469 static void
470 add_cfi_args_size (HOST_WIDE_INT size)
471 {
472   dw_cfi_ref cfi = new_cfi ();
473 
474   /* While we can occasionally have args_size < 0 internally, this state
475      should not persist at a point we actually need an opcode.  */
476   gcc_assert (size >= 0);
477 
478   cfi->dw_cfi_opc = DW_CFA_GNU_args_size;
479   cfi->dw_cfi_oprnd1.dw_cfi_offset = size;
480 
481   add_cfi (cfi);
482 }
483 
484 static void
485 add_cfi_restore (unsigned reg)
486 {
487   dw_cfi_ref cfi = new_cfi ();
488 
489   cfi->dw_cfi_opc = (reg & ~0x3f ? DW_CFA_restore_extended : DW_CFA_restore);
490   cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
491 
492   add_cfi (cfi);
493 }
494 
495 /* Perform ROW->REG_SAVE[COLUMN] = CFI.  CFI may be null, indicating
496    that the register column is no longer saved.  */
497 
498 static void
499 update_row_reg_save (dw_cfi_row *row, unsigned column, dw_cfi_ref cfi)
500 {
501   if (vec_safe_length (row->reg_save) <= column)
502     vec_safe_grow_cleared (row->reg_save, column + 1);
503   (*row->reg_save)[column] = cfi;
504 }
505 
506 /* This function fills in aa dw_cfa_location structure from a dwarf location
507    descriptor sequence.  */
508 
509 static void
510 get_cfa_from_loc_descr (dw_cfa_location *cfa, struct dw_loc_descr_node *loc)
511 {
512   struct dw_loc_descr_node *ptr;
513   cfa->offset = 0;
514   cfa->base_offset = 0;
515   cfa->indirect = 0;
516   cfa->reg = -1;
517 
518   for (ptr = loc; ptr != NULL; ptr = ptr->dw_loc_next)
519     {
520       enum dwarf_location_atom op = ptr->dw_loc_opc;
521 
522       switch (op)
523 	{
524 	case DW_OP_reg0:
525 	case DW_OP_reg1:
526 	case DW_OP_reg2:
527 	case DW_OP_reg3:
528 	case DW_OP_reg4:
529 	case DW_OP_reg5:
530 	case DW_OP_reg6:
531 	case DW_OP_reg7:
532 	case DW_OP_reg8:
533 	case DW_OP_reg9:
534 	case DW_OP_reg10:
535 	case DW_OP_reg11:
536 	case DW_OP_reg12:
537 	case DW_OP_reg13:
538 	case DW_OP_reg14:
539 	case DW_OP_reg15:
540 	case DW_OP_reg16:
541 	case DW_OP_reg17:
542 	case DW_OP_reg18:
543 	case DW_OP_reg19:
544 	case DW_OP_reg20:
545 	case DW_OP_reg21:
546 	case DW_OP_reg22:
547 	case DW_OP_reg23:
548 	case DW_OP_reg24:
549 	case DW_OP_reg25:
550 	case DW_OP_reg26:
551 	case DW_OP_reg27:
552 	case DW_OP_reg28:
553 	case DW_OP_reg29:
554 	case DW_OP_reg30:
555 	case DW_OP_reg31:
556 	  cfa->reg = op - DW_OP_reg0;
557 	  break;
558 	case DW_OP_regx:
559 	  cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
560 	  break;
561 	case DW_OP_breg0:
562 	case DW_OP_breg1:
563 	case DW_OP_breg2:
564 	case DW_OP_breg3:
565 	case DW_OP_breg4:
566 	case DW_OP_breg5:
567 	case DW_OP_breg6:
568 	case DW_OP_breg7:
569 	case DW_OP_breg8:
570 	case DW_OP_breg9:
571 	case DW_OP_breg10:
572 	case DW_OP_breg11:
573 	case DW_OP_breg12:
574 	case DW_OP_breg13:
575 	case DW_OP_breg14:
576 	case DW_OP_breg15:
577 	case DW_OP_breg16:
578 	case DW_OP_breg17:
579 	case DW_OP_breg18:
580 	case DW_OP_breg19:
581 	case DW_OP_breg20:
582 	case DW_OP_breg21:
583 	case DW_OP_breg22:
584 	case DW_OP_breg23:
585 	case DW_OP_breg24:
586 	case DW_OP_breg25:
587 	case DW_OP_breg26:
588 	case DW_OP_breg27:
589 	case DW_OP_breg28:
590 	case DW_OP_breg29:
591 	case DW_OP_breg30:
592 	case DW_OP_breg31:
593 	  cfa->reg = op - DW_OP_breg0;
594 	  cfa->base_offset = ptr->dw_loc_oprnd1.v.val_int;
595 	  break;
596 	case DW_OP_bregx:
597 	  cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
598 	  cfa->base_offset = ptr->dw_loc_oprnd2.v.val_int;
599 	  break;
600 	case DW_OP_deref:
601 	  cfa->indirect = 1;
602 	  break;
603 	case DW_OP_plus_uconst:
604 	  cfa->offset = ptr->dw_loc_oprnd1.v.val_unsigned;
605 	  break;
606 	default:
607 	  gcc_unreachable ();
608 	}
609     }
610 }
611 
612 /* Find the previous value for the CFA, iteratively.  CFI is the opcode
613    to interpret, *LOC will be updated as necessary, *REMEMBER is used for
614    one level of remember/restore state processing.  */
615 
616 void
617 lookup_cfa_1 (dw_cfi_ref cfi, dw_cfa_location *loc, dw_cfa_location *remember)
618 {
619   switch (cfi->dw_cfi_opc)
620     {
621     case DW_CFA_def_cfa_offset:
622     case DW_CFA_def_cfa_offset_sf:
623       loc->offset = cfi->dw_cfi_oprnd1.dw_cfi_offset;
624       break;
625     case DW_CFA_def_cfa_register:
626       loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
627       break;
628     case DW_CFA_def_cfa:
629     case DW_CFA_def_cfa_sf:
630       loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
631       loc->offset = cfi->dw_cfi_oprnd2.dw_cfi_offset;
632       break;
633     case DW_CFA_def_cfa_expression:
634       get_cfa_from_loc_descr (loc, cfi->dw_cfi_oprnd1.dw_cfi_loc);
635       break;
636 
637     case DW_CFA_remember_state:
638       gcc_assert (!remember->in_use);
639       *remember = *loc;
640       remember->in_use = 1;
641       break;
642     case DW_CFA_restore_state:
643       gcc_assert (remember->in_use);
644       *loc = *remember;
645       remember->in_use = 0;
646       break;
647 
648     default:
649       break;
650     }
651 }
652 
653 /* Determine if two dw_cfa_location structures define the same data.  */
654 
655 bool
656 cfa_equal_p (const dw_cfa_location *loc1, const dw_cfa_location *loc2)
657 {
658   return (loc1->reg == loc2->reg
659 	  && loc1->offset == loc2->offset
660 	  && loc1->indirect == loc2->indirect
661 	  && (loc1->indirect == 0
662 	      || loc1->base_offset == loc2->base_offset));
663 }
664 
665 /* Determine if two CFI operands are identical.  */
666 
667 static bool
668 cfi_oprnd_equal_p (enum dw_cfi_oprnd_type t, dw_cfi_oprnd *a, dw_cfi_oprnd *b)
669 {
670   switch (t)
671     {
672     case dw_cfi_oprnd_unused:
673       return true;
674     case dw_cfi_oprnd_reg_num:
675       return a->dw_cfi_reg_num == b->dw_cfi_reg_num;
676     case dw_cfi_oprnd_offset:
677       return a->dw_cfi_offset == b->dw_cfi_offset;
678     case dw_cfi_oprnd_addr:
679       return (a->dw_cfi_addr == b->dw_cfi_addr
680 	      || strcmp (a->dw_cfi_addr, b->dw_cfi_addr) == 0);
681     case dw_cfi_oprnd_loc:
682       return loc_descr_equal_p (a->dw_cfi_loc, b->dw_cfi_loc);
683     }
684   gcc_unreachable ();
685 }
686 
687 /* Determine if two CFI entries are identical.  */
688 
689 static bool
690 cfi_equal_p (dw_cfi_ref a, dw_cfi_ref b)
691 {
692   enum dwarf_call_frame_info opc;
693 
694   /* Make things easier for our callers, including missing operands.  */
695   if (a == b)
696     return true;
697   if (a == NULL || b == NULL)
698     return false;
699 
700   /* Obviously, the opcodes must match.  */
701   opc = a->dw_cfi_opc;
702   if (opc != b->dw_cfi_opc)
703     return false;
704 
705   /* Compare the two operands, re-using the type of the operands as
706      already exposed elsewhere.  */
707   return (cfi_oprnd_equal_p (dw_cfi_oprnd1_desc (opc),
708 			     &a->dw_cfi_oprnd1, &b->dw_cfi_oprnd1)
709 	  && cfi_oprnd_equal_p (dw_cfi_oprnd2_desc (opc),
710 				&a->dw_cfi_oprnd2, &b->dw_cfi_oprnd2));
711 }
712 
713 /* Determine if two CFI_ROW structures are identical.  */
714 
715 static bool
716 cfi_row_equal_p (dw_cfi_row *a, dw_cfi_row *b)
717 {
718   size_t i, n_a, n_b, n_max;
719 
720   if (a->cfa_cfi)
721     {
722       if (!cfi_equal_p (a->cfa_cfi, b->cfa_cfi))
723 	return false;
724     }
725   else if (!cfa_equal_p (&a->cfa, &b->cfa))
726     return false;
727 
728   n_a = vec_safe_length (a->reg_save);
729   n_b = vec_safe_length (b->reg_save);
730   n_max = MAX (n_a, n_b);
731 
732   for (i = 0; i < n_max; ++i)
733     {
734       dw_cfi_ref r_a = NULL, r_b = NULL;
735 
736       if (i < n_a)
737 	r_a = (*a->reg_save)[i];
738       if (i < n_b)
739 	r_b = (*b->reg_save)[i];
740 
741       if (!cfi_equal_p (r_a, r_b))
742         return false;
743     }
744 
745   return true;
746 }
747 
748 /* The CFA is now calculated from NEW_CFA.  Consider OLD_CFA in determining
749    what opcode to emit.  Returns the CFI opcode to effect the change, or
750    NULL if NEW_CFA == OLD_CFA.  */
751 
752 static dw_cfi_ref
753 def_cfa_0 (dw_cfa_location *old_cfa, dw_cfa_location *new_cfa)
754 {
755   dw_cfi_ref cfi;
756 
757   /* If nothing changed, no need to issue any call frame instructions.  */
758   if (cfa_equal_p (old_cfa, new_cfa))
759     return NULL;
760 
761   cfi = new_cfi ();
762 
763   if (new_cfa->reg == old_cfa->reg && !new_cfa->indirect && !old_cfa->indirect)
764     {
765       /* Construct a "DW_CFA_def_cfa_offset <offset>" instruction, indicating
766 	 the CFA register did not change but the offset did.  The data
767 	 factoring for DW_CFA_def_cfa_offset_sf happens in output_cfi, or
768 	 in the assembler via the .cfi_def_cfa_offset directive.  */
769       if (new_cfa->offset < 0)
770 	cfi->dw_cfi_opc = DW_CFA_def_cfa_offset_sf;
771       else
772 	cfi->dw_cfi_opc = DW_CFA_def_cfa_offset;
773       cfi->dw_cfi_oprnd1.dw_cfi_offset = new_cfa->offset;
774     }
775   else if (new_cfa->offset == old_cfa->offset
776 	   && old_cfa->reg != INVALID_REGNUM
777 	   && !new_cfa->indirect
778 	   && !old_cfa->indirect)
779     {
780       /* Construct a "DW_CFA_def_cfa_register <register>" instruction,
781 	 indicating the CFA register has changed to <register> but the
782 	 offset has not changed.  */
783       cfi->dw_cfi_opc = DW_CFA_def_cfa_register;
784       cfi->dw_cfi_oprnd1.dw_cfi_reg_num = new_cfa->reg;
785     }
786   else if (new_cfa->indirect == 0)
787     {
788       /* Construct a "DW_CFA_def_cfa <register> <offset>" instruction,
789 	 indicating the CFA register has changed to <register> with
790 	 the specified offset.  The data factoring for DW_CFA_def_cfa_sf
791 	 happens in output_cfi, or in the assembler via the .cfi_def_cfa
792 	 directive.  */
793       if (new_cfa->offset < 0)
794 	cfi->dw_cfi_opc = DW_CFA_def_cfa_sf;
795       else
796 	cfi->dw_cfi_opc = DW_CFA_def_cfa;
797       cfi->dw_cfi_oprnd1.dw_cfi_reg_num = new_cfa->reg;
798       cfi->dw_cfi_oprnd2.dw_cfi_offset = new_cfa->offset;
799     }
800   else
801     {
802       /* Construct a DW_CFA_def_cfa_expression instruction to
803 	 calculate the CFA using a full location expression since no
804 	 register-offset pair is available.  */
805       struct dw_loc_descr_node *loc_list;
806 
807       cfi->dw_cfi_opc = DW_CFA_def_cfa_expression;
808       loc_list = build_cfa_loc (new_cfa, 0);
809       cfi->dw_cfi_oprnd1.dw_cfi_loc = loc_list;
810     }
811 
812   return cfi;
813 }
814 
815 /* Similarly, but take OLD_CFA from CUR_ROW, and update it after the fact.  */
816 
817 static void
818 def_cfa_1 (dw_cfa_location *new_cfa)
819 {
820   dw_cfi_ref cfi;
821 
822   if (cur_trace->cfa_store.reg == new_cfa->reg && new_cfa->indirect == 0)
823     cur_trace->cfa_store.offset = new_cfa->offset;
824 
825   cfi = def_cfa_0 (&cur_row->cfa, new_cfa);
826   if (cfi)
827     {
828       cur_row->cfa = *new_cfa;
829       cur_row->cfa_cfi = (cfi->dw_cfi_opc == DW_CFA_def_cfa_expression
830 			  ? cfi : NULL);
831 
832       add_cfi (cfi);
833     }
834 }
835 
836 /* Add the CFI for saving a register.  REG is the CFA column number.
837    If SREG is -1, the register is saved at OFFSET from the CFA;
838    otherwise it is saved in SREG.  */
839 
840 static void
841 reg_save (unsigned int reg, unsigned int sreg, HOST_WIDE_INT offset)
842 {
843   dw_fde_ref fde = cfun ? cfun->fde : NULL;
844   dw_cfi_ref cfi = new_cfi ();
845 
846   cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
847 
848   /* When stack is aligned, store REG using DW_CFA_expression with FP.  */
849   if (fde
850       && fde->stack_realign
851       && sreg == INVALID_REGNUM)
852     {
853       cfi->dw_cfi_opc = DW_CFA_expression;
854       cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
855       cfi->dw_cfi_oprnd2.dw_cfi_loc
856 	= build_cfa_aligned_loc (&cur_row->cfa, offset,
857 				 fde->stack_realignment);
858     }
859   else if (sreg == INVALID_REGNUM)
860     {
861       if (need_data_align_sf_opcode (offset))
862 	cfi->dw_cfi_opc = DW_CFA_offset_extended_sf;
863       else if (reg & ~0x3f)
864 	cfi->dw_cfi_opc = DW_CFA_offset_extended;
865       else
866 	cfi->dw_cfi_opc = DW_CFA_offset;
867       cfi->dw_cfi_oprnd2.dw_cfi_offset = offset;
868     }
869   else if (sreg == reg)
870     {
871       /* While we could emit something like DW_CFA_same_value or
872 	 DW_CFA_restore, we never expect to see something like that
873 	 in a prologue.  This is more likely to be a bug.  A backend
874 	 can always bypass this by using REG_CFA_RESTORE directly.  */
875       gcc_unreachable ();
876     }
877   else
878     {
879       cfi->dw_cfi_opc = DW_CFA_register;
880       cfi->dw_cfi_oprnd2.dw_cfi_reg_num = sreg;
881     }
882 
883   add_cfi (cfi);
884   update_row_reg_save (cur_row, reg, cfi);
885 }
886 
887 /* A subroutine of scan_trace.  Check INSN for a REG_ARGS_SIZE note
888    and adjust data structures to match.  */
889 
890 static void
891 notice_args_size (rtx_insn *insn)
892 {
893   HOST_WIDE_INT args_size, delta;
894   rtx note;
895 
896   note = find_reg_note (insn, REG_ARGS_SIZE, NULL);
897   if (note == NULL)
898     return;
899 
900   args_size = INTVAL (XEXP (note, 0));
901   delta = args_size - cur_trace->end_true_args_size;
902   if (delta == 0)
903     return;
904 
905   cur_trace->end_true_args_size = args_size;
906 
907   /* If the CFA is computed off the stack pointer, then we must adjust
908      the computation of the CFA as well.  */
909   if (cur_cfa->reg == dw_stack_pointer_regnum)
910     {
911       gcc_assert (!cur_cfa->indirect);
912 
913       /* Convert a change in args_size (always a positive in the
914 	 direction of stack growth) to a change in stack pointer.  */
915       if (!STACK_GROWS_DOWNWARD)
916 	delta = -delta;
917 
918       cur_cfa->offset += delta;
919     }
920 }
921 
922 /* A subroutine of scan_trace.  INSN is can_throw_internal.  Update the
923    data within the trace related to EH insns and args_size.  */
924 
925 static void
926 notice_eh_throw (rtx_insn *insn)
927 {
928   HOST_WIDE_INT args_size;
929 
930   args_size = cur_trace->end_true_args_size;
931   if (cur_trace->eh_head == NULL)
932     {
933       cur_trace->eh_head = insn;
934       cur_trace->beg_delay_args_size = args_size;
935       cur_trace->end_delay_args_size = args_size;
936     }
937   else if (cur_trace->end_delay_args_size != args_size)
938     {
939       cur_trace->end_delay_args_size = args_size;
940 
941       /* ??? If the CFA is the stack pointer, search backward for the last
942 	 CFI note and insert there.  Given that the stack changed for the
943 	 args_size change, there *must* be such a note in between here and
944 	 the last eh insn.  */
945       add_cfi_args_size (args_size);
946     }
947 }
948 
949 /* Short-hand inline for the very common D_F_R (REGNO (x)) operation.  */
950 /* ??? This ought to go into dwarf2out.h, except that dwarf2out.h is
951    used in places where rtl is prohibited.  */
952 
953 static inline unsigned
954 dwf_regno (const_rtx reg)
955 {
956   gcc_assert (REGNO (reg) < FIRST_PSEUDO_REGISTER);
957   return DWARF_FRAME_REGNUM (REGNO (reg));
958 }
959 
960 /* Compare X and Y for equivalence.  The inputs may be REGs or PC_RTX.  */
961 
962 static bool
963 compare_reg_or_pc (rtx x, rtx y)
964 {
965   if (REG_P (x) && REG_P (y))
966     return REGNO (x) == REGNO (y);
967   return x == y;
968 }
969 
970 /* Record SRC as being saved in DEST.  DEST may be null to delete an
971    existing entry.  SRC may be a register or PC_RTX.  */
972 
973 static void
974 record_reg_saved_in_reg (rtx dest, rtx src)
975 {
976   reg_saved_in_data *elt;
977   size_t i;
978 
979   FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, i, elt)
980     if (compare_reg_or_pc (elt->orig_reg, src))
981       {
982 	if (dest == NULL)
983 	  cur_trace->regs_saved_in_regs.unordered_remove (i);
984 	else
985 	  elt->saved_in_reg = dest;
986 	return;
987       }
988 
989   if (dest == NULL)
990     return;
991 
992   reg_saved_in_data e = {src, dest};
993   cur_trace->regs_saved_in_regs.safe_push (e);
994 }
995 
996 /* Add an entry to QUEUED_REG_SAVES saying that REG is now saved at
997    SREG, or if SREG is NULL then it is saved at OFFSET to the CFA.  */
998 
999 static void
1000 queue_reg_save (rtx reg, rtx sreg, HOST_WIDE_INT offset)
1001 {
1002   queued_reg_save *q;
1003   queued_reg_save e = {reg, sreg, offset};
1004   size_t i;
1005 
1006   /* Duplicates waste space, but it's also necessary to remove them
1007      for correctness, since the queue gets output in reverse order.  */
1008   FOR_EACH_VEC_ELT (queued_reg_saves, i, q)
1009     if (compare_reg_or_pc (q->reg, reg))
1010       {
1011 	*q = e;
1012 	return;
1013       }
1014 
1015   queued_reg_saves.safe_push (e);
1016 }
1017 
1018 /* Output all the entries in QUEUED_REG_SAVES.  */
1019 
1020 static void
1021 dwarf2out_flush_queued_reg_saves (void)
1022 {
1023   queued_reg_save *q;
1024   size_t i;
1025 
1026   FOR_EACH_VEC_ELT (queued_reg_saves, i, q)
1027     {
1028       unsigned int reg, sreg;
1029 
1030       record_reg_saved_in_reg (q->saved_reg, q->reg);
1031 
1032       if (q->reg == pc_rtx)
1033 	reg = DWARF_FRAME_RETURN_COLUMN;
1034       else
1035         reg = dwf_regno (q->reg);
1036       if (q->saved_reg)
1037 	sreg = dwf_regno (q->saved_reg);
1038       else
1039 	sreg = INVALID_REGNUM;
1040       reg_save (reg, sreg, q->cfa_offset);
1041     }
1042 
1043   queued_reg_saves.truncate (0);
1044 }
1045 
1046 /* Does INSN clobber any register which QUEUED_REG_SAVES lists a saved
1047    location for?  Or, does it clobber a register which we've previously
1048    said that some other register is saved in, and for which we now
1049    have a new location for?  */
1050 
1051 static bool
1052 clobbers_queued_reg_save (const_rtx insn)
1053 {
1054   queued_reg_save *q;
1055   size_t iq;
1056 
1057   FOR_EACH_VEC_ELT (queued_reg_saves, iq, q)
1058     {
1059       size_t ir;
1060       reg_saved_in_data *rir;
1061 
1062       if (modified_in_p (q->reg, insn))
1063 	return true;
1064 
1065       FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, ir, rir)
1066 	if (compare_reg_or_pc (q->reg, rir->orig_reg)
1067 	    && modified_in_p (rir->saved_in_reg, insn))
1068 	  return true;
1069     }
1070 
1071   return false;
1072 }
1073 
1074 /* What register, if any, is currently saved in REG?  */
1075 
1076 static rtx
1077 reg_saved_in (rtx reg)
1078 {
1079   unsigned int regn = REGNO (reg);
1080   queued_reg_save *q;
1081   reg_saved_in_data *rir;
1082   size_t i;
1083 
1084   FOR_EACH_VEC_ELT (queued_reg_saves, i, q)
1085     if (q->saved_reg && regn == REGNO (q->saved_reg))
1086       return q->reg;
1087 
1088   FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, i, rir)
1089     if (regn == REGNO (rir->saved_in_reg))
1090       return rir->orig_reg;
1091 
1092   return NULL_RTX;
1093 }
1094 
1095 /* A subroutine of dwarf2out_frame_debug, process a REG_DEF_CFA note.  */
1096 
1097 static void
1098 dwarf2out_frame_debug_def_cfa (rtx pat)
1099 {
1100   memset (cur_cfa, 0, sizeof (*cur_cfa));
1101 
1102   if (GET_CODE (pat) == PLUS)
1103     {
1104       cur_cfa->offset = INTVAL (XEXP (pat, 1));
1105       pat = XEXP (pat, 0);
1106     }
1107   if (MEM_P (pat))
1108     {
1109       cur_cfa->indirect = 1;
1110       pat = XEXP (pat, 0);
1111       if (GET_CODE (pat) == PLUS)
1112 	{
1113 	  cur_cfa->base_offset = INTVAL (XEXP (pat, 1));
1114 	  pat = XEXP (pat, 0);
1115 	}
1116     }
1117   /* ??? If this fails, we could be calling into the _loc functions to
1118      define a full expression.  So far no port does that.  */
1119   gcc_assert (REG_P (pat));
1120   cur_cfa->reg = dwf_regno (pat);
1121 }
1122 
1123 /* A subroutine of dwarf2out_frame_debug, process a REG_ADJUST_CFA note.  */
1124 
1125 static void
1126 dwarf2out_frame_debug_adjust_cfa (rtx pat)
1127 {
1128   rtx src, dest;
1129 
1130   gcc_assert (GET_CODE (pat) == SET);
1131   dest = XEXP (pat, 0);
1132   src = XEXP (pat, 1);
1133 
1134   switch (GET_CODE (src))
1135     {
1136     case PLUS:
1137       gcc_assert (dwf_regno (XEXP (src, 0)) == cur_cfa->reg);
1138       cur_cfa->offset -= INTVAL (XEXP (src, 1));
1139       break;
1140 
1141     case REG:
1142       break;
1143 
1144     default:
1145       gcc_unreachable ();
1146     }
1147 
1148   cur_cfa->reg = dwf_regno (dest);
1149   gcc_assert (cur_cfa->indirect == 0);
1150 }
1151 
1152 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_OFFSET note.  */
1153 
1154 static void
1155 dwarf2out_frame_debug_cfa_offset (rtx set)
1156 {
1157   HOST_WIDE_INT offset;
1158   rtx src, addr, span;
1159   unsigned int sregno;
1160 
1161   src = XEXP (set, 1);
1162   addr = XEXP (set, 0);
1163   gcc_assert (MEM_P (addr));
1164   addr = XEXP (addr, 0);
1165 
1166   /* As documented, only consider extremely simple addresses.  */
1167   switch (GET_CODE (addr))
1168     {
1169     case REG:
1170       gcc_assert (dwf_regno (addr) == cur_cfa->reg);
1171       offset = -cur_cfa->offset;
1172       break;
1173     case PLUS:
1174       gcc_assert (dwf_regno (XEXP (addr, 0)) == cur_cfa->reg);
1175       offset = INTVAL (XEXP (addr, 1)) - cur_cfa->offset;
1176       break;
1177     default:
1178       gcc_unreachable ();
1179     }
1180 
1181   if (src == pc_rtx)
1182     {
1183       span = NULL;
1184       sregno = DWARF_FRAME_RETURN_COLUMN;
1185     }
1186   else
1187     {
1188       span = targetm.dwarf_register_span (src);
1189       sregno = dwf_regno (src);
1190     }
1191 
1192   /* ??? We'd like to use queue_reg_save, but we need to come up with
1193      a different flushing heuristic for epilogues.  */
1194   if (!span)
1195     reg_save (sregno, INVALID_REGNUM, offset);
1196   else
1197     {
1198       /* We have a PARALLEL describing where the contents of SRC live.
1199    	 Adjust the offset for each piece of the PARALLEL.  */
1200       HOST_WIDE_INT span_offset = offset;
1201 
1202       gcc_assert (GET_CODE (span) == PARALLEL);
1203 
1204       const int par_len = XVECLEN (span, 0);
1205       for (int par_index = 0; par_index < par_len; par_index++)
1206 	{
1207 	  rtx elem = XVECEXP (span, 0, par_index);
1208 	  sregno = dwf_regno (src);
1209 	  reg_save (sregno, INVALID_REGNUM, span_offset);
1210 	  span_offset += GET_MODE_SIZE (GET_MODE (elem));
1211 	}
1212     }
1213 }
1214 
1215 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_REGISTER note.  */
1216 
1217 static void
1218 dwarf2out_frame_debug_cfa_register (rtx set)
1219 {
1220   rtx src, dest;
1221   unsigned sregno, dregno;
1222 
1223   src = XEXP (set, 1);
1224   dest = XEXP (set, 0);
1225 
1226   record_reg_saved_in_reg (dest, src);
1227   if (src == pc_rtx)
1228     sregno = DWARF_FRAME_RETURN_COLUMN;
1229   else
1230     sregno = dwf_regno (src);
1231 
1232   dregno = dwf_regno (dest);
1233 
1234   /* ??? We'd like to use queue_reg_save, but we need to come up with
1235      a different flushing heuristic for epilogues.  */
1236   reg_save (sregno, dregno, 0);
1237 }
1238 
1239 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_EXPRESSION note.  */
1240 
1241 static void
1242 dwarf2out_frame_debug_cfa_expression (rtx set)
1243 {
1244   rtx src, dest, span;
1245   dw_cfi_ref cfi = new_cfi ();
1246   unsigned regno;
1247 
1248   dest = SET_DEST (set);
1249   src = SET_SRC (set);
1250 
1251   gcc_assert (REG_P (src));
1252   gcc_assert (MEM_P (dest));
1253 
1254   span = targetm.dwarf_register_span (src);
1255   gcc_assert (!span);
1256 
1257   regno = dwf_regno (src);
1258 
1259   cfi->dw_cfi_opc = DW_CFA_expression;
1260   cfi->dw_cfi_oprnd1.dw_cfi_reg_num = regno;
1261   cfi->dw_cfi_oprnd2.dw_cfi_loc
1262     = mem_loc_descriptor (XEXP (dest, 0), get_address_mode (dest),
1263 			  GET_MODE (dest), VAR_INIT_STATUS_INITIALIZED);
1264 
1265   /* ??? We'd like to use queue_reg_save, were the interface different,
1266      and, as above, we could manage flushing for epilogues.  */
1267   add_cfi (cfi);
1268   update_row_reg_save (cur_row, regno, cfi);
1269 }
1270 
1271 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_VAL_EXPRESSION
1272    note.  */
1273 
1274 static void
1275 dwarf2out_frame_debug_cfa_val_expression (rtx set)
1276 {
1277   rtx dest = SET_DEST (set);
1278   gcc_assert (REG_P (dest));
1279 
1280   rtx span = targetm.dwarf_register_span (dest);
1281   gcc_assert (!span);
1282 
1283   rtx src = SET_SRC (set);
1284   dw_cfi_ref cfi = new_cfi ();
1285   cfi->dw_cfi_opc = DW_CFA_val_expression;
1286   cfi->dw_cfi_oprnd1.dw_cfi_reg_num = dwf_regno (dest);
1287   cfi->dw_cfi_oprnd2.dw_cfi_loc
1288     = mem_loc_descriptor (src, GET_MODE (src),
1289 			  GET_MODE (dest), VAR_INIT_STATUS_INITIALIZED);
1290   add_cfi (cfi);
1291   update_row_reg_save (cur_row, dwf_regno (dest), cfi);
1292 }
1293 
1294 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_RESTORE note.  */
1295 
1296 static void
1297 dwarf2out_frame_debug_cfa_restore (rtx reg)
1298 {
1299   gcc_assert (REG_P (reg));
1300 
1301   rtx span = targetm.dwarf_register_span (reg);
1302   if (!span)
1303     {
1304       unsigned int regno = dwf_regno (reg);
1305       add_cfi_restore (regno);
1306       update_row_reg_save (cur_row, regno, NULL);
1307     }
1308   else
1309     {
1310       /* We have a PARALLEL describing where the contents of REG live.
1311 	 Restore the register for each piece of the PARALLEL.  */
1312       gcc_assert (GET_CODE (span) == PARALLEL);
1313 
1314       const int par_len = XVECLEN (span, 0);
1315       for (int par_index = 0; par_index < par_len; par_index++)
1316 	{
1317 	  reg = XVECEXP (span, 0, par_index);
1318 	  gcc_assert (REG_P (reg));
1319 	  unsigned int regno = dwf_regno (reg);
1320 	  add_cfi_restore (regno);
1321 	  update_row_reg_save (cur_row, regno, NULL);
1322 	}
1323     }
1324 }
1325 
1326 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_WINDOW_SAVE.
1327    ??? Perhaps we should note in the CIE where windows are saved (instead of
1328    assuming 0(cfa)) and what registers are in the window.  */
1329 
1330 static void
1331 dwarf2out_frame_debug_cfa_window_save (void)
1332 {
1333   dw_cfi_ref cfi = new_cfi ();
1334 
1335   cfi->dw_cfi_opc = DW_CFA_GNU_window_save;
1336   add_cfi (cfi);
1337 }
1338 
1339 /* Record call frame debugging information for an expression EXPR,
1340    which either sets SP or FP (adjusting how we calculate the frame
1341    address) or saves a register to the stack or another register.
1342    LABEL indicates the address of EXPR.
1343 
1344    This function encodes a state machine mapping rtxes to actions on
1345    cfa, cfa_store, and cfa_temp.reg.  We describe these rules so
1346    users need not read the source code.
1347 
1348   The High-Level Picture
1349 
1350   Changes in the register we use to calculate the CFA: Currently we
1351   assume that if you copy the CFA register into another register, we
1352   should take the other one as the new CFA register; this seems to
1353   work pretty well.  If it's wrong for some target, it's simple
1354   enough not to set RTX_FRAME_RELATED_P on the insn in question.
1355 
1356   Changes in the register we use for saving registers to the stack:
1357   This is usually SP, but not always.  Again, we deduce that if you
1358   copy SP into another register (and SP is not the CFA register),
1359   then the new register is the one we will be using for register
1360   saves.  This also seems to work.
1361 
1362   Register saves: There's not much guesswork about this one; if
1363   RTX_FRAME_RELATED_P is set on an insn which modifies memory, it's a
1364   register save, and the register used to calculate the destination
1365   had better be the one we think we're using for this purpose.
1366   It's also assumed that a copy from a call-saved register to another
1367   register is saving that register if RTX_FRAME_RELATED_P is set on
1368   that instruction.  If the copy is from a call-saved register to
1369   the *same* register, that means that the register is now the same
1370   value as in the caller.
1371 
1372   Except: If the register being saved is the CFA register, and the
1373   offset is nonzero, we are saving the CFA, so we assume we have to
1374   use DW_CFA_def_cfa_expression.  If the offset is 0, we assume that
1375   the intent is to save the value of SP from the previous frame.
1376 
1377   In addition, if a register has previously been saved to a different
1378   register,
1379 
1380   Invariants / Summaries of Rules
1381 
1382   cfa	       current rule for calculating the CFA.  It usually
1383 	       consists of a register and an offset.  This is
1384 	       actually stored in *cur_cfa, but abbreviated
1385 	       for the purposes of this documentation.
1386   cfa_store    register used by prologue code to save things to the stack
1387 	       cfa_store.offset is the offset from the value of
1388 	       cfa_store.reg to the actual CFA
1389   cfa_temp     register holding an integral value.  cfa_temp.offset
1390 	       stores the value, which will be used to adjust the
1391 	       stack pointer.  cfa_temp is also used like cfa_store,
1392 	       to track stores to the stack via fp or a temp reg.
1393 
1394   Rules  1- 4: Setting a register's value to cfa.reg or an expression
1395 	       with cfa.reg as the first operand changes the cfa.reg and its
1396 	       cfa.offset.  Rule 1 and 4 also set cfa_temp.reg and
1397 	       cfa_temp.offset.
1398 
1399   Rules  6- 9: Set a non-cfa.reg register value to a constant or an
1400 	       expression yielding a constant.  This sets cfa_temp.reg
1401 	       and cfa_temp.offset.
1402 
1403   Rule 5:      Create a new register cfa_store used to save items to the
1404 	       stack.
1405 
1406   Rules 10-14: Save a register to the stack.  Define offset as the
1407 	       difference of the original location and cfa_store's
1408 	       location (or cfa_temp's location if cfa_temp is used).
1409 
1410   Rules 16-20: If AND operation happens on sp in prologue, we assume
1411 	       stack is realigned.  We will use a group of DW_OP_XXX
1412 	       expressions to represent the location of the stored
1413 	       register instead of CFA+offset.
1414 
1415   The Rules
1416 
1417   "{a,b}" indicates a choice of a xor b.
1418   "<reg>:cfa.reg" indicates that <reg> must equal cfa.reg.
1419 
1420   Rule 1:
1421   (set <reg1> <reg2>:cfa.reg)
1422   effects: cfa.reg = <reg1>
1423 	   cfa.offset unchanged
1424 	   cfa_temp.reg = <reg1>
1425 	   cfa_temp.offset = cfa.offset
1426 
1427   Rule 2:
1428   (set sp ({minus,plus,losum} {sp,fp}:cfa.reg
1429 			      {<const_int>,<reg>:cfa_temp.reg}))
1430   effects: cfa.reg = sp if fp used
1431 	   cfa.offset += {+/- <const_int>, cfa_temp.offset} if cfa.reg==sp
1432 	   cfa_store.offset += {+/- <const_int>, cfa_temp.offset}
1433 	     if cfa_store.reg==sp
1434 
1435   Rule 3:
1436   (set fp ({minus,plus,losum} <reg>:cfa.reg <const_int>))
1437   effects: cfa.reg = fp
1438 	   cfa_offset += +/- <const_int>
1439 
1440   Rule 4:
1441   (set <reg1> ({plus,losum} <reg2>:cfa.reg <const_int>))
1442   constraints: <reg1> != fp
1443 	       <reg1> != sp
1444   effects: cfa.reg = <reg1>
1445 	   cfa_temp.reg = <reg1>
1446 	   cfa_temp.offset = cfa.offset
1447 
1448   Rule 5:
1449   (set <reg1> (plus <reg2>:cfa_temp.reg sp:cfa.reg))
1450   constraints: <reg1> != fp
1451 	       <reg1> != sp
1452   effects: cfa_store.reg = <reg1>
1453 	   cfa_store.offset = cfa.offset - cfa_temp.offset
1454 
1455   Rule 6:
1456   (set <reg> <const_int>)
1457   effects: cfa_temp.reg = <reg>
1458 	   cfa_temp.offset = <const_int>
1459 
1460   Rule 7:
1461   (set <reg1>:cfa_temp.reg (ior <reg2>:cfa_temp.reg <const_int>))
1462   effects: cfa_temp.reg = <reg1>
1463 	   cfa_temp.offset |= <const_int>
1464 
1465   Rule 8:
1466   (set <reg> (high <exp>))
1467   effects: none
1468 
1469   Rule 9:
1470   (set <reg> (lo_sum <exp> <const_int>))
1471   effects: cfa_temp.reg = <reg>
1472 	   cfa_temp.offset = <const_int>
1473 
1474   Rule 10:
1475   (set (mem ({pre,post}_modify sp:cfa_store (???? <reg1> <const_int>))) <reg2>)
1476   effects: cfa_store.offset -= <const_int>
1477 	   cfa.offset = cfa_store.offset if cfa.reg == sp
1478 	   cfa.reg = sp
1479 	   cfa.base_offset = -cfa_store.offset
1480 
1481   Rule 11:
1482   (set (mem ({pre_inc,pre_dec,post_dec} sp:cfa_store.reg)) <reg>)
1483   effects: cfa_store.offset += -/+ mode_size(mem)
1484 	   cfa.offset = cfa_store.offset if cfa.reg == sp
1485 	   cfa.reg = sp
1486 	   cfa.base_offset = -cfa_store.offset
1487 
1488   Rule 12:
1489   (set (mem ({minus,plus,losum} <reg1>:{cfa_store,cfa_temp} <const_int>))
1490 
1491        <reg2>)
1492   effects: cfa.reg = <reg1>
1493 	   cfa.base_offset = -/+ <const_int> - {cfa_store,cfa_temp}.offset
1494 
1495   Rule 13:
1496   (set (mem <reg1>:{cfa_store,cfa_temp}) <reg2>)
1497   effects: cfa.reg = <reg1>
1498 	   cfa.base_offset = -{cfa_store,cfa_temp}.offset
1499 
1500   Rule 14:
1501   (set (mem (post_inc <reg1>:cfa_temp <const_int>)) <reg2>)
1502   effects: cfa.reg = <reg1>
1503 	   cfa.base_offset = -cfa_temp.offset
1504 	   cfa_temp.offset -= mode_size(mem)
1505 
1506   Rule 15:
1507   (set <reg> {unspec, unspec_volatile})
1508   effects: target-dependent
1509 
1510   Rule 16:
1511   (set sp (and: sp <const_int>))
1512   constraints: cfa_store.reg == sp
1513   effects: cfun->fde.stack_realign = 1
1514            cfa_store.offset = 0
1515 	   fde->drap_reg = cfa.reg if cfa.reg != sp and cfa.reg != fp
1516 
1517   Rule 17:
1518   (set (mem ({pre_inc, pre_dec} sp)) (mem (plus (cfa.reg) (const_int))))
1519   effects: cfa_store.offset += -/+ mode_size(mem)
1520 
1521   Rule 18:
1522   (set (mem ({pre_inc, pre_dec} sp)) fp)
1523   constraints: fde->stack_realign == 1
1524   effects: cfa_store.offset = 0
1525 	   cfa.reg != HARD_FRAME_POINTER_REGNUM
1526 
1527   Rule 19:
1528   (set (mem ({pre_inc, pre_dec} sp)) cfa.reg)
1529   constraints: fde->stack_realign == 1
1530                && cfa.offset == 0
1531                && cfa.indirect == 0
1532                && cfa.reg != HARD_FRAME_POINTER_REGNUM
1533   effects: Use DW_CFA_def_cfa_expression to define cfa
1534   	   cfa.reg == fde->drap_reg  */
1535 
1536 static void
1537 dwarf2out_frame_debug_expr (rtx expr)
1538 {
1539   rtx src, dest, span;
1540   HOST_WIDE_INT offset;
1541   dw_fde_ref fde;
1542 
1543   /* If RTX_FRAME_RELATED_P is set on a PARALLEL, process each member of
1544      the PARALLEL independently. The first element is always processed if
1545      it is a SET. This is for backward compatibility.   Other elements
1546      are processed only if they are SETs and the RTX_FRAME_RELATED_P
1547      flag is set in them.  */
1548   if (GET_CODE (expr) == PARALLEL || GET_CODE (expr) == SEQUENCE)
1549     {
1550       int par_index;
1551       int limit = XVECLEN (expr, 0);
1552       rtx elem;
1553 
1554       /* PARALLELs have strict read-modify-write semantics, so we
1555 	 ought to evaluate every rvalue before changing any lvalue.
1556 	 It's cumbersome to do that in general, but there's an
1557 	 easy approximation that is enough for all current users:
1558 	 handle register saves before register assignments.  */
1559       if (GET_CODE (expr) == PARALLEL)
1560 	for (par_index = 0; par_index < limit; par_index++)
1561 	  {
1562 	    elem = XVECEXP (expr, 0, par_index);
1563 	    if (GET_CODE (elem) == SET
1564 		&& MEM_P (SET_DEST (elem))
1565 		&& (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1566 	      dwarf2out_frame_debug_expr (elem);
1567 	  }
1568 
1569       for (par_index = 0; par_index < limit; par_index++)
1570 	{
1571 	  elem = XVECEXP (expr, 0, par_index);
1572 	  if (GET_CODE (elem) == SET
1573 	      && (!MEM_P (SET_DEST (elem)) || GET_CODE (expr) == SEQUENCE)
1574 	      && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1575 	    dwarf2out_frame_debug_expr (elem);
1576 	}
1577       return;
1578     }
1579 
1580   gcc_assert (GET_CODE (expr) == SET);
1581 
1582   src = SET_SRC (expr);
1583   dest = SET_DEST (expr);
1584 
1585   if (REG_P (src))
1586     {
1587       rtx rsi = reg_saved_in (src);
1588       if (rsi)
1589 	src = rsi;
1590     }
1591 
1592   fde = cfun->fde;
1593 
1594   switch (GET_CODE (dest))
1595     {
1596     case REG:
1597       switch (GET_CODE (src))
1598 	{
1599 	  /* Setting FP from SP.  */
1600 	case REG:
1601 	  if (cur_cfa->reg == dwf_regno (src))
1602 	    {
1603 	      /* Rule 1 */
1604 	      /* Update the CFA rule wrt SP or FP.  Make sure src is
1605 		 relative to the current CFA register.
1606 
1607 		 We used to require that dest be either SP or FP, but the
1608 		 ARM copies SP to a temporary register, and from there to
1609 		 FP.  So we just rely on the backends to only set
1610 		 RTX_FRAME_RELATED_P on appropriate insns.  */
1611 	      cur_cfa->reg = dwf_regno (dest);
1612 	      cur_trace->cfa_temp.reg = cur_cfa->reg;
1613 	      cur_trace->cfa_temp.offset = cur_cfa->offset;
1614 	    }
1615 	  else
1616 	    {
1617 	      /* Saving a register in a register.  */
1618 	      gcc_assert (!fixed_regs [REGNO (dest)]
1619 			  /* For the SPARC and its register window.  */
1620 			  || (dwf_regno (src) == DWARF_FRAME_RETURN_COLUMN));
1621 
1622               /* After stack is aligned, we can only save SP in FP
1623 		 if drap register is used.  In this case, we have
1624 		 to restore stack pointer with the CFA value and we
1625 		 don't generate this DWARF information.  */
1626 	      if (fde
1627 		  && fde->stack_realign
1628 		  && REGNO (src) == STACK_POINTER_REGNUM)
1629 		gcc_assert (REGNO (dest) == HARD_FRAME_POINTER_REGNUM
1630 			    && fde->drap_reg != INVALID_REGNUM
1631 			    && cur_cfa->reg != dwf_regno (src));
1632 	      else
1633 		queue_reg_save (src, dest, 0);
1634 	    }
1635 	  break;
1636 
1637 	case PLUS:
1638 	case MINUS:
1639 	case LO_SUM:
1640 	  if (dest == stack_pointer_rtx)
1641 	    {
1642 	      /* Rule 2 */
1643 	      /* Adjusting SP.  */
1644 	      switch (GET_CODE (XEXP (src, 1)))
1645 		{
1646 		case CONST_INT:
1647 		  offset = INTVAL (XEXP (src, 1));
1648 		  break;
1649 		case REG:
1650 		  gcc_assert (dwf_regno (XEXP (src, 1))
1651 			      == cur_trace->cfa_temp.reg);
1652 		  offset = cur_trace->cfa_temp.offset;
1653 		  break;
1654 		default:
1655 		  gcc_unreachable ();
1656 		}
1657 
1658 	      if (XEXP (src, 0) == hard_frame_pointer_rtx)
1659 		{
1660 		  /* Restoring SP from FP in the epilogue.  */
1661 		  gcc_assert (cur_cfa->reg == dw_frame_pointer_regnum);
1662 		  cur_cfa->reg = dw_stack_pointer_regnum;
1663 		}
1664 	      else if (GET_CODE (src) == LO_SUM)
1665 		/* Assume we've set the source reg of the LO_SUM from sp.  */
1666 		;
1667 	      else
1668 		gcc_assert (XEXP (src, 0) == stack_pointer_rtx);
1669 
1670 	      if (GET_CODE (src) != MINUS)
1671 		offset = -offset;
1672 	      if (cur_cfa->reg == dw_stack_pointer_regnum)
1673 		cur_cfa->offset += offset;
1674 	      if (cur_trace->cfa_store.reg == dw_stack_pointer_regnum)
1675 		cur_trace->cfa_store.offset += offset;
1676 	    }
1677 	  else if (dest == hard_frame_pointer_rtx)
1678 	    {
1679 	      /* Rule 3 */
1680 	      /* Either setting the FP from an offset of the SP,
1681 		 or adjusting the FP */
1682 	      gcc_assert (frame_pointer_needed);
1683 
1684 	      gcc_assert (REG_P (XEXP (src, 0))
1685 			  && dwf_regno (XEXP (src, 0)) == cur_cfa->reg
1686 			  && CONST_INT_P (XEXP (src, 1)));
1687 	      offset = INTVAL (XEXP (src, 1));
1688 	      if (GET_CODE (src) != MINUS)
1689 		offset = -offset;
1690 	      cur_cfa->offset += offset;
1691 	      cur_cfa->reg = dw_frame_pointer_regnum;
1692 	    }
1693 	  else
1694 	    {
1695 	      gcc_assert (GET_CODE (src) != MINUS);
1696 
1697 	      /* Rule 4 */
1698 	      if (REG_P (XEXP (src, 0))
1699 		  && dwf_regno (XEXP (src, 0)) == cur_cfa->reg
1700 		  && CONST_INT_P (XEXP (src, 1)))
1701 		{
1702 		  /* Setting a temporary CFA register that will be copied
1703 		     into the FP later on.  */
1704 		  offset = - INTVAL (XEXP (src, 1));
1705 		  cur_cfa->offset += offset;
1706 		  cur_cfa->reg = dwf_regno (dest);
1707 		  /* Or used to save regs to the stack.  */
1708 		  cur_trace->cfa_temp.reg = cur_cfa->reg;
1709 		  cur_trace->cfa_temp.offset = cur_cfa->offset;
1710 		}
1711 
1712 	      /* Rule 5 */
1713 	      else if (REG_P (XEXP (src, 0))
1714 		       && dwf_regno (XEXP (src, 0)) == cur_trace->cfa_temp.reg
1715 		       && XEXP (src, 1) == stack_pointer_rtx)
1716 		{
1717 		  /* Setting a scratch register that we will use instead
1718 		     of SP for saving registers to the stack.  */
1719 		  gcc_assert (cur_cfa->reg == dw_stack_pointer_regnum);
1720 		  cur_trace->cfa_store.reg = dwf_regno (dest);
1721 		  cur_trace->cfa_store.offset
1722 		    = cur_cfa->offset - cur_trace->cfa_temp.offset;
1723 		}
1724 
1725 	      /* Rule 9 */
1726 	      else if (GET_CODE (src) == LO_SUM
1727 		       && CONST_INT_P (XEXP (src, 1)))
1728 		{
1729 		  cur_trace->cfa_temp.reg = dwf_regno (dest);
1730 		  cur_trace->cfa_temp.offset = INTVAL (XEXP (src, 1));
1731 		}
1732 	      else
1733 		gcc_unreachable ();
1734 	    }
1735 	  break;
1736 
1737 	  /* Rule 6 */
1738 	case CONST_INT:
1739 	  cur_trace->cfa_temp.reg = dwf_regno (dest);
1740 	  cur_trace->cfa_temp.offset = INTVAL (src);
1741 	  break;
1742 
1743 	  /* Rule 7 */
1744 	case IOR:
1745 	  gcc_assert (REG_P (XEXP (src, 0))
1746 		      && dwf_regno (XEXP (src, 0)) == cur_trace->cfa_temp.reg
1747 		      && CONST_INT_P (XEXP (src, 1)));
1748 
1749 	  cur_trace->cfa_temp.reg = dwf_regno (dest);
1750 	  cur_trace->cfa_temp.offset |= INTVAL (XEXP (src, 1));
1751 	  break;
1752 
1753 	  /* Skip over HIGH, assuming it will be followed by a LO_SUM,
1754 	     which will fill in all of the bits.  */
1755 	  /* Rule 8 */
1756 	case HIGH:
1757 	  break;
1758 
1759 	  /* Rule 15 */
1760 	case UNSPEC:
1761 	case UNSPEC_VOLATILE:
1762 	  /* All unspecs should be represented by REG_CFA_* notes.  */
1763 	  gcc_unreachable ();
1764 	  return;
1765 
1766 	  /* Rule 16 */
1767 	case AND:
1768           /* If this AND operation happens on stack pointer in prologue,
1769 	     we assume the stack is realigned and we extract the
1770 	     alignment.  */
1771           if (fde && XEXP (src, 0) == stack_pointer_rtx)
1772             {
1773 	      /* We interpret reg_save differently with stack_realign set.
1774 		 Thus we must flush whatever we have queued first.  */
1775 	      dwarf2out_flush_queued_reg_saves ();
1776 
1777               gcc_assert (cur_trace->cfa_store.reg
1778 			  == dwf_regno (XEXP (src, 0)));
1779               fde->stack_realign = 1;
1780               fde->stack_realignment = INTVAL (XEXP (src, 1));
1781               cur_trace->cfa_store.offset = 0;
1782 
1783 	      if (cur_cfa->reg != dw_stack_pointer_regnum
1784 		  && cur_cfa->reg != dw_frame_pointer_regnum)
1785 		fde->drap_reg = cur_cfa->reg;
1786             }
1787           return;
1788 
1789 	default:
1790 	  gcc_unreachable ();
1791 	}
1792       break;
1793 
1794     case MEM:
1795 
1796       /* Saving a register to the stack.  Make sure dest is relative to the
1797 	 CFA register.  */
1798       switch (GET_CODE (XEXP (dest, 0)))
1799 	{
1800 	  /* Rule 10 */
1801 	  /* With a push.  */
1802 	case PRE_MODIFY:
1803 	case POST_MODIFY:
1804 	  /* We can't handle variable size modifications.  */
1805 	  gcc_assert (GET_CODE (XEXP (XEXP (XEXP (dest, 0), 1), 1))
1806 		      == CONST_INT);
1807 	  offset = -INTVAL (XEXP (XEXP (XEXP (dest, 0), 1), 1));
1808 
1809 	  gcc_assert (REGNO (XEXP (XEXP (dest, 0), 0)) == STACK_POINTER_REGNUM
1810 		      && cur_trace->cfa_store.reg == dw_stack_pointer_regnum);
1811 
1812 	  cur_trace->cfa_store.offset += offset;
1813 	  if (cur_cfa->reg == dw_stack_pointer_regnum)
1814 	    cur_cfa->offset = cur_trace->cfa_store.offset;
1815 
1816 	  if (GET_CODE (XEXP (dest, 0)) == POST_MODIFY)
1817 	    offset -= cur_trace->cfa_store.offset;
1818 	  else
1819 	    offset = -cur_trace->cfa_store.offset;
1820 	  break;
1821 
1822 	  /* Rule 11 */
1823 	case PRE_INC:
1824 	case PRE_DEC:
1825 	case POST_DEC:
1826 	  offset = GET_MODE_SIZE (GET_MODE (dest));
1827 	  if (GET_CODE (XEXP (dest, 0)) == PRE_INC)
1828 	    offset = -offset;
1829 
1830 	  gcc_assert ((REGNO (XEXP (XEXP (dest, 0), 0))
1831 		       == STACK_POINTER_REGNUM)
1832 		      && cur_trace->cfa_store.reg == dw_stack_pointer_regnum);
1833 
1834 	  cur_trace->cfa_store.offset += offset;
1835 
1836           /* Rule 18: If stack is aligned, we will use FP as a
1837 	     reference to represent the address of the stored
1838 	     regiser.  */
1839           if (fde
1840               && fde->stack_realign
1841 	      && REG_P (src)
1842 	      && REGNO (src) == HARD_FRAME_POINTER_REGNUM)
1843 	    {
1844 	      gcc_assert (cur_cfa->reg != dw_frame_pointer_regnum);
1845 	      cur_trace->cfa_store.offset = 0;
1846 	    }
1847 
1848 	  if (cur_cfa->reg == dw_stack_pointer_regnum)
1849 	    cur_cfa->offset = cur_trace->cfa_store.offset;
1850 
1851 	  if (GET_CODE (XEXP (dest, 0)) == POST_DEC)
1852 	    offset += -cur_trace->cfa_store.offset;
1853 	  else
1854 	    offset = -cur_trace->cfa_store.offset;
1855 	  break;
1856 
1857 	  /* Rule 12 */
1858 	  /* With an offset.  */
1859 	case PLUS:
1860 	case MINUS:
1861 	case LO_SUM:
1862 	  {
1863 	    unsigned int regno;
1864 
1865 	    gcc_assert (CONST_INT_P (XEXP (XEXP (dest, 0), 1))
1866 			&& REG_P (XEXP (XEXP (dest, 0), 0)));
1867 	    offset = INTVAL (XEXP (XEXP (dest, 0), 1));
1868 	    if (GET_CODE (XEXP (dest, 0)) == MINUS)
1869 	      offset = -offset;
1870 
1871 	    regno = dwf_regno (XEXP (XEXP (dest, 0), 0));
1872 
1873 	    if (cur_cfa->reg == regno)
1874 	      offset -= cur_cfa->offset;
1875 	    else if (cur_trace->cfa_store.reg == regno)
1876 	      offset -= cur_trace->cfa_store.offset;
1877 	    else
1878 	      {
1879 		gcc_assert (cur_trace->cfa_temp.reg == regno);
1880 		offset -= cur_trace->cfa_temp.offset;
1881 	      }
1882 	  }
1883 	  break;
1884 
1885 	  /* Rule 13 */
1886 	  /* Without an offset.  */
1887 	case REG:
1888 	  {
1889 	    unsigned int regno = dwf_regno (XEXP (dest, 0));
1890 
1891 	    if (cur_cfa->reg == regno)
1892 	      offset = -cur_cfa->offset;
1893 	    else if (cur_trace->cfa_store.reg == regno)
1894 	      offset = -cur_trace->cfa_store.offset;
1895 	    else
1896 	      {
1897 		gcc_assert (cur_trace->cfa_temp.reg == regno);
1898 		offset = -cur_trace->cfa_temp.offset;
1899 	      }
1900 	  }
1901 	  break;
1902 
1903 	  /* Rule 14 */
1904 	case POST_INC:
1905 	  gcc_assert (cur_trace->cfa_temp.reg
1906 		      == dwf_regno (XEXP (XEXP (dest, 0), 0)));
1907 	  offset = -cur_trace->cfa_temp.offset;
1908 	  cur_trace->cfa_temp.offset -= GET_MODE_SIZE (GET_MODE (dest));
1909 	  break;
1910 
1911 	default:
1912 	  gcc_unreachable ();
1913 	}
1914 
1915       /* Rule 17 */
1916       /* If the source operand of this MEM operation is a memory,
1917 	 we only care how much stack grew.  */
1918       if (MEM_P (src))
1919         break;
1920 
1921       if (REG_P (src)
1922 	  && REGNO (src) != STACK_POINTER_REGNUM
1923 	  && REGNO (src) != HARD_FRAME_POINTER_REGNUM
1924 	  && dwf_regno (src) == cur_cfa->reg)
1925 	{
1926 	  /* We're storing the current CFA reg into the stack.  */
1927 
1928 	  if (cur_cfa->offset == 0)
1929 	    {
1930               /* Rule 19 */
1931               /* If stack is aligned, putting CFA reg into stack means
1932 		 we can no longer use reg + offset to represent CFA.
1933 		 Here we use DW_CFA_def_cfa_expression instead.  The
1934 		 result of this expression equals to the original CFA
1935 		 value.  */
1936               if (fde
1937                   && fde->stack_realign
1938                   && cur_cfa->indirect == 0
1939                   && cur_cfa->reg != dw_frame_pointer_regnum)
1940                 {
1941 		  gcc_assert (fde->drap_reg == cur_cfa->reg);
1942 
1943 		  cur_cfa->indirect = 1;
1944 		  cur_cfa->reg = dw_frame_pointer_regnum;
1945 		  cur_cfa->base_offset = offset;
1946 		  cur_cfa->offset = 0;
1947 
1948 		  fde->drap_reg_saved = 1;
1949 		  break;
1950                 }
1951 
1952 	      /* If the source register is exactly the CFA, assume
1953 		 we're saving SP like any other register; this happens
1954 		 on the ARM.  */
1955 	      queue_reg_save (stack_pointer_rtx, NULL_RTX, offset);
1956 	      break;
1957 	    }
1958 	  else
1959 	    {
1960 	      /* Otherwise, we'll need to look in the stack to
1961 		 calculate the CFA.  */
1962 	      rtx x = XEXP (dest, 0);
1963 
1964 	      if (!REG_P (x))
1965 		x = XEXP (x, 0);
1966 	      gcc_assert (REG_P (x));
1967 
1968 	      cur_cfa->reg = dwf_regno (x);
1969 	      cur_cfa->base_offset = offset;
1970 	      cur_cfa->indirect = 1;
1971 	      break;
1972 	    }
1973 	}
1974 
1975       if (REG_P (src))
1976 	span = targetm.dwarf_register_span (src);
1977       else
1978 	span = NULL;
1979 
1980       if (!span)
1981 	queue_reg_save (src, NULL_RTX, offset);
1982       else
1983 	{
1984 	  /* We have a PARALLEL describing where the contents of SRC live.
1985 	     Queue register saves for each piece of the PARALLEL.  */
1986 	  HOST_WIDE_INT span_offset = offset;
1987 
1988 	  gcc_assert (GET_CODE (span) == PARALLEL);
1989 
1990 	  const int par_len = XVECLEN (span, 0);
1991 	  for (int par_index = 0; par_index < par_len; par_index++)
1992 	    {
1993 	      rtx elem = XVECEXP (span, 0, par_index);
1994 	      queue_reg_save (elem, NULL_RTX, span_offset);
1995 	      span_offset += GET_MODE_SIZE (GET_MODE (elem));
1996 	    }
1997 	}
1998       break;
1999 
2000     default:
2001       gcc_unreachable ();
2002     }
2003 }
2004 
2005 /* Record call frame debugging information for INSN, which either sets
2006    SP or FP (adjusting how we calculate the frame address) or saves a
2007    register to the stack.  */
2008 
2009 static void
2010 dwarf2out_frame_debug (rtx_insn *insn)
2011 {
2012   rtx note, n, pat;
2013   bool handled_one = false;
2014 
2015   for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
2016     switch (REG_NOTE_KIND (note))
2017       {
2018       case REG_FRAME_RELATED_EXPR:
2019 	pat = XEXP (note, 0);
2020 	goto do_frame_expr;
2021 
2022       case REG_CFA_DEF_CFA:
2023 	dwarf2out_frame_debug_def_cfa (XEXP (note, 0));
2024 	handled_one = true;
2025 	break;
2026 
2027       case REG_CFA_ADJUST_CFA:
2028 	n = XEXP (note, 0);
2029 	if (n == NULL)
2030 	  {
2031 	    n = PATTERN (insn);
2032 	    if (GET_CODE (n) == PARALLEL)
2033 	      n = XVECEXP (n, 0, 0);
2034 	  }
2035 	dwarf2out_frame_debug_adjust_cfa (n);
2036 	handled_one = true;
2037 	break;
2038 
2039       case REG_CFA_OFFSET:
2040 	n = XEXP (note, 0);
2041 	if (n == NULL)
2042 	  n = single_set (insn);
2043 	dwarf2out_frame_debug_cfa_offset (n);
2044 	handled_one = true;
2045 	break;
2046 
2047       case REG_CFA_REGISTER:
2048 	n = XEXP (note, 0);
2049 	if (n == NULL)
2050 	  {
2051 	    n = PATTERN (insn);
2052 	    if (GET_CODE (n) == PARALLEL)
2053 	      n = XVECEXP (n, 0, 0);
2054 	  }
2055 	dwarf2out_frame_debug_cfa_register (n);
2056 	handled_one = true;
2057 	break;
2058 
2059       case REG_CFA_EXPRESSION:
2060       case REG_CFA_VAL_EXPRESSION:
2061 	n = XEXP (note, 0);
2062 	if (n == NULL)
2063 	  n = single_set (insn);
2064 
2065 	if (REG_NOTE_KIND (note) == REG_CFA_EXPRESSION)
2066 	  dwarf2out_frame_debug_cfa_expression (n);
2067 	else
2068 	  dwarf2out_frame_debug_cfa_val_expression (n);
2069 
2070 	handled_one = true;
2071 	break;
2072 
2073       case REG_CFA_RESTORE:
2074 	n = XEXP (note, 0);
2075 	if (n == NULL)
2076 	  {
2077 	    n = PATTERN (insn);
2078 	    if (GET_CODE (n) == PARALLEL)
2079 	      n = XVECEXP (n, 0, 0);
2080 	    n = XEXP (n, 0);
2081 	  }
2082 	dwarf2out_frame_debug_cfa_restore (n);
2083 	handled_one = true;
2084 	break;
2085 
2086       case REG_CFA_SET_VDRAP:
2087 	n = XEXP (note, 0);
2088 	if (REG_P (n))
2089 	  {
2090 	    dw_fde_ref fde = cfun->fde;
2091 	    if (fde)
2092 	      {
2093 		gcc_assert (fde->vdrap_reg == INVALID_REGNUM);
2094 		if (REG_P (n))
2095 		  fde->vdrap_reg = dwf_regno (n);
2096 	      }
2097 	  }
2098 	handled_one = true;
2099 	break;
2100 
2101       case REG_CFA_TOGGLE_RA_MANGLE:
2102       case REG_CFA_WINDOW_SAVE:
2103 	/* We overload both of these operations onto the same DWARF opcode.  */
2104 	dwarf2out_frame_debug_cfa_window_save ();
2105 	handled_one = true;
2106 	break;
2107 
2108       case REG_CFA_FLUSH_QUEUE:
2109 	/* The actual flush happens elsewhere.  */
2110 	handled_one = true;
2111 	break;
2112 
2113       default:
2114 	break;
2115       }
2116 
2117   if (!handled_one)
2118     {
2119       pat = PATTERN (insn);
2120     do_frame_expr:
2121       dwarf2out_frame_debug_expr (pat);
2122 
2123       /* Check again.  A parallel can save and update the same register.
2124          We could probably check just once, here, but this is safer than
2125          removing the check at the start of the function.  */
2126       if (clobbers_queued_reg_save (pat))
2127 	dwarf2out_flush_queued_reg_saves ();
2128     }
2129 }
2130 
2131 /* Emit CFI info to change the state from OLD_ROW to NEW_ROW.  */
2132 
2133 static void
2134 change_cfi_row (dw_cfi_row *old_row, dw_cfi_row *new_row)
2135 {
2136   size_t i, n_old, n_new, n_max;
2137   dw_cfi_ref cfi;
2138 
2139   if (new_row->cfa_cfi && !cfi_equal_p (old_row->cfa_cfi, new_row->cfa_cfi))
2140     add_cfi (new_row->cfa_cfi);
2141   else
2142     {
2143       cfi = def_cfa_0 (&old_row->cfa, &new_row->cfa);
2144       if (cfi)
2145 	add_cfi (cfi);
2146     }
2147 
2148   n_old = vec_safe_length (old_row->reg_save);
2149   n_new = vec_safe_length (new_row->reg_save);
2150   n_max = MAX (n_old, n_new);
2151 
2152   for (i = 0; i < n_max; ++i)
2153     {
2154       dw_cfi_ref r_old = NULL, r_new = NULL;
2155 
2156       if (i < n_old)
2157 	r_old = (*old_row->reg_save)[i];
2158       if (i < n_new)
2159 	r_new = (*new_row->reg_save)[i];
2160 
2161       if (r_old == r_new)
2162 	;
2163       else if (r_new == NULL)
2164 	add_cfi_restore (i);
2165       else if (!cfi_equal_p (r_old, r_new))
2166         add_cfi (r_new);
2167     }
2168 }
2169 
2170 /* Examine CFI and return true if a cfi label and set_loc is needed
2171    beforehand.  Even when generating CFI assembler instructions, we
2172    still have to add the cfi to the list so that lookup_cfa_1 works
2173    later on.  When -g2 and above we even need to force emitting of
2174    CFI labels and add to list a DW_CFA_set_loc for convert_cfa_to_fb_loc_list
2175    purposes.  If we're generating DWARF3 output we use DW_OP_call_frame_cfa
2176    and so don't use convert_cfa_to_fb_loc_list.  */
2177 
2178 static bool
2179 cfi_label_required_p (dw_cfi_ref cfi)
2180 {
2181   if (!dwarf2out_do_cfi_asm ())
2182     return true;
2183 
2184   if (dwarf_version == 2
2185       && debug_info_level > DINFO_LEVEL_TERSE
2186       && (write_symbols == DWARF2_DEBUG
2187 	  || write_symbols == VMS_AND_DWARF2_DEBUG))
2188     {
2189       switch (cfi->dw_cfi_opc)
2190 	{
2191 	case DW_CFA_def_cfa_offset:
2192 	case DW_CFA_def_cfa_offset_sf:
2193 	case DW_CFA_def_cfa_register:
2194 	case DW_CFA_def_cfa:
2195 	case DW_CFA_def_cfa_sf:
2196 	case DW_CFA_def_cfa_expression:
2197 	case DW_CFA_restore_state:
2198 	  return true;
2199 	default:
2200 	  return false;
2201 	}
2202     }
2203   return false;
2204 }
2205 
2206 /* Walk the function, looking for NOTE_INSN_CFI notes.  Add the CFIs to the
2207    function's FDE, adding CFI labels and set_loc/advance_loc opcodes as
2208    necessary.  */
2209 static void
2210 add_cfis_to_fde (void)
2211 {
2212   dw_fde_ref fde = cfun->fde;
2213   rtx_insn *insn, *next;
2214   /* We always start with a function_begin label.  */
2215   bool first = false;
2216 
2217   for (insn = get_insns (); insn; insn = next)
2218     {
2219       next = NEXT_INSN (insn);
2220 
2221       if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
2222 	{
2223 	  fde->dw_fde_switch_cfi_index = vec_safe_length (fde->dw_fde_cfi);
2224 	  /* Don't attempt to advance_loc4 between labels
2225 	     in different sections.  */
2226 	  first = true;
2227 	}
2228 
2229       if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_CFI)
2230 	{
2231 	  bool required = cfi_label_required_p (NOTE_CFI (insn));
2232 	  while (next)
2233 	    if (NOTE_P (next) && NOTE_KIND (next) == NOTE_INSN_CFI)
2234 	      {
2235 		required |= cfi_label_required_p (NOTE_CFI (next));
2236 		next = NEXT_INSN (next);
2237 	      }
2238 	    else if (active_insn_p (next)
2239 		     || (NOTE_P (next) && (NOTE_KIND (next)
2240 					   == NOTE_INSN_SWITCH_TEXT_SECTIONS)))
2241 	      break;
2242 	    else
2243 	      next = NEXT_INSN (next);
2244 	  if (required)
2245 	    {
2246 	      int num = dwarf2out_cfi_label_num;
2247 	      const char *label = dwarf2out_cfi_label ();
2248 	      dw_cfi_ref xcfi;
2249 
2250 	      /* Set the location counter to the new label.  */
2251 	      xcfi = new_cfi ();
2252 	      xcfi->dw_cfi_opc = (first ? DW_CFA_set_loc
2253 				  : DW_CFA_advance_loc4);
2254 	      xcfi->dw_cfi_oprnd1.dw_cfi_addr = label;
2255 	      vec_safe_push (fde->dw_fde_cfi, xcfi);
2256 
2257 	      rtx_note *tmp = emit_note_before (NOTE_INSN_CFI_LABEL, insn);
2258 	      NOTE_LABEL_NUMBER (tmp) = num;
2259 	    }
2260 
2261 	  do
2262 	    {
2263 	      if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_CFI)
2264 		vec_safe_push (fde->dw_fde_cfi, NOTE_CFI (insn));
2265 	      insn = NEXT_INSN (insn);
2266 	    }
2267 	  while (insn != next);
2268 	  first = false;
2269 	}
2270     }
2271 }
2272 
2273 static void dump_cfi_row (FILE *f, dw_cfi_row *row);
2274 
2275 /* If LABEL is the start of a trace, then initialize the state of that
2276    trace from CUR_TRACE and CUR_ROW.  */
2277 
2278 static void
2279 maybe_record_trace_start (rtx_insn *start, rtx_insn *origin)
2280 {
2281   dw_trace_info *ti;
2282   HOST_WIDE_INT args_size;
2283 
2284   ti = get_trace_info (start);
2285   gcc_assert (ti != NULL);
2286 
2287   if (dump_file)
2288     {
2289       fprintf (dump_file, "   saw edge from trace %u to %u (via %s %d)\n",
2290 	       cur_trace->id, ti->id,
2291 	       (origin ? rtx_name[(int) GET_CODE (origin)] : "fallthru"),
2292 	       (origin ? INSN_UID (origin) : 0));
2293     }
2294 
2295   args_size = cur_trace->end_true_args_size;
2296   if (ti->beg_row == NULL)
2297     {
2298       /* This is the first time we've encountered this trace.  Propagate
2299 	 state across the edge and push the trace onto the work list.  */
2300       ti->beg_row = copy_cfi_row (cur_row);
2301       ti->beg_true_args_size = args_size;
2302 
2303       ti->cfa_store = cur_trace->cfa_store;
2304       ti->cfa_temp = cur_trace->cfa_temp;
2305       ti->regs_saved_in_regs = cur_trace->regs_saved_in_regs.copy ();
2306 
2307       trace_work_list.safe_push (ti);
2308 
2309       if (dump_file)
2310 	fprintf (dump_file, "\tpush trace %u to worklist\n", ti->id);
2311     }
2312   else
2313     {
2314 
2315       /* We ought to have the same state incoming to a given trace no
2316 	 matter how we arrive at the trace.  Anything else means we've
2317 	 got some kind of optimization error.  */
2318 #if CHECKING_P
2319       if (!cfi_row_equal_p (cur_row, ti->beg_row))
2320 	{
2321 	  if (dump_file)
2322 	    {
2323 	      fprintf (dump_file, "Inconsistent CFI state!\n");
2324 	      fprintf (dump_file, "SHOULD have:\n");
2325 	      dump_cfi_row (dump_file, ti->beg_row);
2326 	      fprintf (dump_file, "DO have:\n");
2327 	      dump_cfi_row (dump_file, cur_row);
2328 	    }
2329 
2330 	  gcc_unreachable ();
2331 	}
2332 #endif
2333 
2334       /* The args_size is allowed to conflict if it isn't actually used.  */
2335       if (ti->beg_true_args_size != args_size)
2336 	ti->args_size_undefined = true;
2337     }
2338 }
2339 
2340 /* Similarly, but handle the args_size and CFA reset across EH
2341    and non-local goto edges.  */
2342 
2343 static void
2344 maybe_record_trace_start_abnormal (rtx_insn *start, rtx_insn *origin)
2345 {
2346   HOST_WIDE_INT save_args_size, delta;
2347   dw_cfa_location save_cfa;
2348 
2349   save_args_size = cur_trace->end_true_args_size;
2350   if (save_args_size == 0)
2351     {
2352       maybe_record_trace_start (start, origin);
2353       return;
2354     }
2355 
2356   delta = -save_args_size;
2357   cur_trace->end_true_args_size = 0;
2358 
2359   save_cfa = cur_row->cfa;
2360   if (cur_row->cfa.reg == dw_stack_pointer_regnum)
2361     {
2362       /* Convert a change in args_size (always a positive in the
2363 	 direction of stack growth) to a change in stack pointer.  */
2364       if (!STACK_GROWS_DOWNWARD)
2365 	delta = -delta;
2366 
2367       cur_row->cfa.offset += delta;
2368     }
2369 
2370   maybe_record_trace_start (start, origin);
2371 
2372   cur_trace->end_true_args_size = save_args_size;
2373   cur_row->cfa = save_cfa;
2374 }
2375 
2376 /* Propagate CUR_TRACE state to the destinations implied by INSN.  */
2377 /* ??? Sadly, this is in large part a duplicate of make_edges.  */
2378 
2379 static void
2380 create_trace_edges (rtx_insn *insn)
2381 {
2382   rtx tmp;
2383   int i, n;
2384 
2385   if (JUMP_P (insn))
2386     {
2387       rtx_jump_table_data *table;
2388 
2389       if (find_reg_note (insn, REG_NON_LOCAL_GOTO, NULL_RTX))
2390 	return;
2391 
2392       if (tablejump_p (insn, NULL, &table))
2393 	{
2394 	  rtvec vec = table->get_labels ();
2395 
2396 	  n = GET_NUM_ELEM (vec);
2397 	  for (i = 0; i < n; ++i)
2398 	    {
2399 	      rtx_insn *lab = as_a <rtx_insn *> (XEXP (RTVEC_ELT (vec, i), 0));
2400 	      maybe_record_trace_start (lab, insn);
2401 	    }
2402 	}
2403       else if (computed_jump_p (insn))
2404 	{
2405 	  rtx_insn *temp;
2406 	  unsigned int i;
2407 	  FOR_EACH_VEC_SAFE_ELT (forced_labels, i, temp)
2408 	    maybe_record_trace_start (temp, insn);
2409 	}
2410       else if (returnjump_p (insn))
2411 	;
2412       else if ((tmp = extract_asm_operands (PATTERN (insn))) != NULL)
2413 	{
2414 	  n = ASM_OPERANDS_LABEL_LENGTH (tmp);
2415 	  for (i = 0; i < n; ++i)
2416 	    {
2417 	      rtx_insn *lab =
2418 		as_a <rtx_insn *> (XEXP (ASM_OPERANDS_LABEL (tmp, i), 0));
2419 	      maybe_record_trace_start (lab, insn);
2420 	    }
2421 	}
2422       else
2423 	{
2424 	  rtx_insn *lab = JUMP_LABEL_AS_INSN (insn);
2425 	  gcc_assert (lab != NULL);
2426 	  maybe_record_trace_start (lab, insn);
2427 	}
2428     }
2429   else if (CALL_P (insn))
2430     {
2431       /* Sibling calls don't have edges inside this function.  */
2432       if (SIBLING_CALL_P (insn))
2433 	return;
2434 
2435       /* Process non-local goto edges.  */
2436       if (can_nonlocal_goto (insn))
2437 	for (rtx_insn_list *lab = nonlocal_goto_handler_labels;
2438 	     lab;
2439 	     lab = lab->next ())
2440 	  maybe_record_trace_start_abnormal (lab->insn (), insn);
2441     }
2442   else if (rtx_sequence *seq = dyn_cast <rtx_sequence *> (PATTERN (insn)))
2443     {
2444       int i, n = seq->len ();
2445       for (i = 0; i < n; ++i)
2446 	create_trace_edges (seq->insn (i));
2447       return;
2448     }
2449 
2450   /* Process EH edges.  */
2451   if (CALL_P (insn) || cfun->can_throw_non_call_exceptions)
2452     {
2453       eh_landing_pad lp = get_eh_landing_pad_from_rtx (insn);
2454       if (lp)
2455 	maybe_record_trace_start_abnormal (lp->landing_pad, insn);
2456     }
2457 }
2458 
2459 /* A subroutine of scan_trace.  Do what needs to be done "after" INSN.  */
2460 
2461 static void
2462 scan_insn_after (rtx_insn *insn)
2463 {
2464   if (RTX_FRAME_RELATED_P (insn))
2465     dwarf2out_frame_debug (insn);
2466   notice_args_size (insn);
2467 }
2468 
2469 /* Scan the trace beginning at INSN and create the CFI notes for the
2470    instructions therein.  */
2471 
2472 static void
2473 scan_trace (dw_trace_info *trace)
2474 {
2475   rtx_insn *prev, *insn = trace->head;
2476   dw_cfa_location this_cfa;
2477 
2478   if (dump_file)
2479     fprintf (dump_file, "Processing trace %u : start at %s %d\n",
2480 	     trace->id, rtx_name[(int) GET_CODE (insn)],
2481 	     INSN_UID (insn));
2482 
2483   trace->end_row = copy_cfi_row (trace->beg_row);
2484   trace->end_true_args_size = trace->beg_true_args_size;
2485 
2486   cur_trace = trace;
2487   cur_row = trace->end_row;
2488 
2489   this_cfa = cur_row->cfa;
2490   cur_cfa = &this_cfa;
2491 
2492   for (prev = insn, insn = NEXT_INSN (insn);
2493        insn;
2494        prev = insn, insn = NEXT_INSN (insn))
2495     {
2496       rtx_insn *control;
2497 
2498       /* Do everything that happens "before" the insn.  */
2499       add_cfi_insn = prev;
2500 
2501       /* Notice the end of a trace.  */
2502       if (BARRIER_P (insn))
2503 	{
2504 	  /* Don't bother saving the unneeded queued registers at all.  */
2505 	  queued_reg_saves.truncate (0);
2506 	  break;
2507 	}
2508       if (save_point_p (insn))
2509 	{
2510 	  /* Propagate across fallthru edges.  */
2511 	  dwarf2out_flush_queued_reg_saves ();
2512 	  maybe_record_trace_start (insn, NULL);
2513 	  break;
2514 	}
2515 
2516       if (DEBUG_INSN_P (insn) || !inside_basic_block_p (insn))
2517 	continue;
2518 
2519       /* Handle all changes to the row state.  Sequences require special
2520 	 handling for the positioning of the notes.  */
2521       if (rtx_sequence *pat = dyn_cast <rtx_sequence *> (PATTERN (insn)))
2522 	{
2523 	  rtx_insn *elt;
2524 	  int i, n = pat->len ();
2525 
2526 	  control = pat->insn (0);
2527 	  if (can_throw_internal (control))
2528 	    notice_eh_throw (control);
2529 	  dwarf2out_flush_queued_reg_saves ();
2530 
2531 	  if (JUMP_P (control) && INSN_ANNULLED_BRANCH_P (control))
2532 	    {
2533 	      /* ??? Hopefully multiple delay slots are not annulled.  */
2534 	      gcc_assert (n == 2);
2535 	      gcc_assert (!RTX_FRAME_RELATED_P (control));
2536 	      gcc_assert (!find_reg_note (control, REG_ARGS_SIZE, NULL));
2537 
2538 	      elt = pat->insn (1);
2539 
2540 	      if (INSN_FROM_TARGET_P (elt))
2541 		{
2542 		  HOST_WIDE_INT restore_args_size;
2543 		  cfi_vec save_row_reg_save;
2544 
2545 		  /* If ELT is an instruction from target of an annulled
2546 		     branch, the effects are for the target only and so
2547 		     the args_size and CFA along the current path
2548 		     shouldn't change.  */
2549 		  add_cfi_insn = NULL;
2550 		  restore_args_size = cur_trace->end_true_args_size;
2551 		  cur_cfa = &cur_row->cfa;
2552 		  save_row_reg_save = vec_safe_copy (cur_row->reg_save);
2553 
2554 		  scan_insn_after (elt);
2555 
2556 		  /* ??? Should we instead save the entire row state?  */
2557 		  gcc_assert (!queued_reg_saves.length ());
2558 
2559 		  create_trace_edges (control);
2560 
2561 		  cur_trace->end_true_args_size = restore_args_size;
2562 		  cur_row->cfa = this_cfa;
2563 		  cur_row->reg_save = save_row_reg_save;
2564 		  cur_cfa = &this_cfa;
2565 		}
2566 	      else
2567 		{
2568 		  /* If ELT is a annulled branch-taken instruction (i.e.
2569 		     executed only when branch is not taken), the args_size
2570 		     and CFA should not change through the jump.  */
2571 		  create_trace_edges (control);
2572 
2573 		  /* Update and continue with the trace.  */
2574 		  add_cfi_insn = insn;
2575 		  scan_insn_after (elt);
2576 		  def_cfa_1 (&this_cfa);
2577 		}
2578 	      continue;
2579 	    }
2580 
2581 	  /* The insns in the delay slot should all be considered to happen
2582 	     "before" a call insn.  Consider a call with a stack pointer
2583 	     adjustment in the delay slot.  The backtrace from the callee
2584 	     should include the sp adjustment.  Unfortunately, that leaves
2585 	     us with an unavoidable unwinding error exactly at the call insn
2586 	     itself.  For jump insns we'd prefer to avoid this error by
2587 	     placing the notes after the sequence.  */
2588 	  if (JUMP_P (control))
2589 	    add_cfi_insn = insn;
2590 
2591 	  for (i = 1; i < n; ++i)
2592 	    {
2593 	      elt = pat->insn (i);
2594 	      scan_insn_after (elt);
2595 	    }
2596 
2597 	  /* Make sure any register saves are visible at the jump target.  */
2598 	  dwarf2out_flush_queued_reg_saves ();
2599 	  any_cfis_emitted = false;
2600 
2601           /* However, if there is some adjustment on the call itself, e.g.
2602 	     a call_pop, that action should be considered to happen after
2603 	     the call returns.  */
2604 	  add_cfi_insn = insn;
2605 	  scan_insn_after (control);
2606 	}
2607       else
2608 	{
2609 	  /* Flush data before calls and jumps, and of course if necessary.  */
2610 	  if (can_throw_internal (insn))
2611 	    {
2612 	      notice_eh_throw (insn);
2613 	      dwarf2out_flush_queued_reg_saves ();
2614 	    }
2615 	  else if (!NONJUMP_INSN_P (insn)
2616 		   || clobbers_queued_reg_save (insn)
2617 		   || find_reg_note (insn, REG_CFA_FLUSH_QUEUE, NULL))
2618 	    dwarf2out_flush_queued_reg_saves ();
2619 	  any_cfis_emitted = false;
2620 
2621 	  add_cfi_insn = insn;
2622 	  scan_insn_after (insn);
2623 	  control = insn;
2624 	}
2625 
2626       /* Between frame-related-p and args_size we might have otherwise
2627 	 emitted two cfa adjustments.  Do it now.  */
2628       def_cfa_1 (&this_cfa);
2629 
2630       /* Minimize the number of advances by emitting the entire queue
2631 	 once anything is emitted.  */
2632       if (any_cfis_emitted
2633 	  || find_reg_note (insn, REG_CFA_FLUSH_QUEUE, NULL))
2634 	dwarf2out_flush_queued_reg_saves ();
2635 
2636       /* Note that a test for control_flow_insn_p does exactly the
2637 	 same tests as are done to actually create the edges.  So
2638 	 always call the routine and let it not create edges for
2639 	 non-control-flow insns.  */
2640       create_trace_edges (control);
2641     }
2642 
2643   add_cfi_insn = NULL;
2644   cur_row = NULL;
2645   cur_trace = NULL;
2646   cur_cfa = NULL;
2647 }
2648 
2649 /* Scan the function and create the initial set of CFI notes.  */
2650 
2651 static void
2652 create_cfi_notes (void)
2653 {
2654   dw_trace_info *ti;
2655 
2656   gcc_checking_assert (!queued_reg_saves.exists ());
2657   gcc_checking_assert (!trace_work_list.exists ());
2658 
2659   /* Always begin at the entry trace.  */
2660   ti = &trace_info[0];
2661   scan_trace (ti);
2662 
2663   while (!trace_work_list.is_empty ())
2664     {
2665       ti = trace_work_list.pop ();
2666       scan_trace (ti);
2667     }
2668 
2669   queued_reg_saves.release ();
2670   trace_work_list.release ();
2671 }
2672 
2673 /* Return the insn before the first NOTE_INSN_CFI after START.  */
2674 
2675 static rtx_insn *
2676 before_next_cfi_note (rtx_insn *start)
2677 {
2678   rtx_insn *prev = start;
2679   while (start)
2680     {
2681       if (NOTE_P (start) && NOTE_KIND (start) == NOTE_INSN_CFI)
2682 	return prev;
2683       prev = start;
2684       start = NEXT_INSN (start);
2685     }
2686   gcc_unreachable ();
2687 }
2688 
2689 /* Insert CFI notes between traces to properly change state between them.  */
2690 
2691 static void
2692 connect_traces (void)
2693 {
2694   unsigned i, n = trace_info.length ();
2695   dw_trace_info *prev_ti, *ti;
2696 
2697   /* ??? Ideally, we should have both queued and processed every trace.
2698      However the current representation of constant pools on various targets
2699      is indistinguishable from unreachable code.  Assume for the moment that
2700      we can simply skip over such traces.  */
2701   /* ??? Consider creating a DATA_INSN rtx code to indicate that
2702      these are not "real" instructions, and should not be considered.
2703      This could be generically useful for tablejump data as well.  */
2704   /* Remove all unprocessed traces from the list.  */
2705   for (i = n - 1; i > 0; --i)
2706     {
2707       ti = &trace_info[i];
2708       if (ti->beg_row == NULL)
2709 	{
2710 	  trace_info.ordered_remove (i);
2711 	  n -= 1;
2712 	}
2713       else
2714 	gcc_assert (ti->end_row != NULL);
2715     }
2716 
2717   /* Work from the end back to the beginning.  This lets us easily insert
2718      remember/restore_state notes in the correct order wrt other notes.  */
2719   prev_ti = &trace_info[n - 1];
2720   for (i = n - 1; i > 0; --i)
2721     {
2722       dw_cfi_row *old_row;
2723 
2724       ti = prev_ti;
2725       prev_ti = &trace_info[i - 1];
2726 
2727       add_cfi_insn = ti->head;
2728 
2729       /* In dwarf2out_switch_text_section, we'll begin a new FDE
2730 	 for the portion of the function in the alternate text
2731 	 section.  The row state at the very beginning of that
2732 	 new FDE will be exactly the row state from the CIE.  */
2733       if (ti->switch_sections)
2734 	old_row = cie_cfi_row;
2735       else
2736 	{
2737 	  old_row = prev_ti->end_row;
2738 	  /* If there's no change from the previous end state, fine.  */
2739 	  if (cfi_row_equal_p (old_row, ti->beg_row))
2740 	    ;
2741 	  /* Otherwise check for the common case of sharing state with
2742 	     the beginning of an epilogue, but not the end.  Insert
2743 	     remember/restore opcodes in that case.  */
2744 	  else if (cfi_row_equal_p (prev_ti->beg_row, ti->beg_row))
2745 	    {
2746 	      dw_cfi_ref cfi;
2747 
2748 	      /* Note that if we blindly insert the remember at the
2749 		 start of the trace, we can wind up increasing the
2750 		 size of the unwind info due to extra advance opcodes.
2751 		 Instead, put the remember immediately before the next
2752 		 state change.  We know there must be one, because the
2753 		 state at the beginning and head of the trace differ.  */
2754 	      add_cfi_insn = before_next_cfi_note (prev_ti->head);
2755 	      cfi = new_cfi ();
2756 	      cfi->dw_cfi_opc = DW_CFA_remember_state;
2757 	      add_cfi (cfi);
2758 
2759 	      add_cfi_insn = ti->head;
2760 	      cfi = new_cfi ();
2761 	      cfi->dw_cfi_opc = DW_CFA_restore_state;
2762 	      add_cfi (cfi);
2763 
2764 	      old_row = prev_ti->beg_row;
2765 	    }
2766 	  /* Otherwise, we'll simply change state from the previous end.  */
2767 	}
2768 
2769       change_cfi_row (old_row, ti->beg_row);
2770 
2771       if (dump_file && add_cfi_insn != ti->head)
2772 	{
2773 	  rtx_insn *note;
2774 
2775 	  fprintf (dump_file, "Fixup between trace %u and %u:\n",
2776 		   prev_ti->id, ti->id);
2777 
2778 	  note = ti->head;
2779 	  do
2780 	    {
2781 	      note = NEXT_INSN (note);
2782 	      gcc_assert (NOTE_P (note) && NOTE_KIND (note) == NOTE_INSN_CFI);
2783 	      output_cfi_directive (dump_file, NOTE_CFI (note));
2784 	    }
2785 	  while (note != add_cfi_insn);
2786 	}
2787     }
2788 
2789   /* Connect args_size between traces that have can_throw_internal insns.  */
2790   if (cfun->eh->lp_array)
2791     {
2792       HOST_WIDE_INT prev_args_size = 0;
2793 
2794       for (i = 0; i < n; ++i)
2795 	{
2796 	  ti = &trace_info[i];
2797 
2798 	  if (ti->switch_sections)
2799 	    prev_args_size = 0;
2800 	  if (ti->eh_head == NULL)
2801 	    continue;
2802 	  gcc_assert (!ti->args_size_undefined);
2803 
2804 	  if (ti->beg_delay_args_size != prev_args_size)
2805 	    {
2806 	      /* ??? Search back to previous CFI note.  */
2807 	      add_cfi_insn = PREV_INSN (ti->eh_head);
2808 	      add_cfi_args_size (ti->beg_delay_args_size);
2809 	    }
2810 
2811 	  prev_args_size = ti->end_delay_args_size;
2812 	}
2813     }
2814 }
2815 
2816 /* Set up the pseudo-cfg of instruction traces, as described at the
2817    block comment at the top of the file.  */
2818 
2819 static void
2820 create_pseudo_cfg (void)
2821 {
2822   bool saw_barrier, switch_sections;
2823   dw_trace_info ti;
2824   rtx_insn *insn;
2825   unsigned i;
2826 
2827   /* The first trace begins at the start of the function,
2828      and begins with the CIE row state.  */
2829   trace_info.create (16);
2830   memset (&ti, 0, sizeof (ti));
2831   ti.head = get_insns ();
2832   ti.beg_row = cie_cfi_row;
2833   ti.cfa_store = cie_cfi_row->cfa;
2834   ti.cfa_temp.reg = INVALID_REGNUM;
2835   trace_info.quick_push (ti);
2836 
2837   if (cie_return_save)
2838     ti.regs_saved_in_regs.safe_push (*cie_return_save);
2839 
2840   /* Walk all the insns, collecting start of trace locations.  */
2841   saw_barrier = false;
2842   switch_sections = false;
2843   for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
2844     {
2845       if (BARRIER_P (insn))
2846 	saw_barrier = true;
2847       else if (NOTE_P (insn)
2848 	       && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
2849 	{
2850 	  /* We should have just seen a barrier.  */
2851 	  gcc_assert (saw_barrier);
2852 	  switch_sections = true;
2853 	}
2854       /* Watch out for save_point notes between basic blocks.
2855 	 In particular, a note after a barrier.  Do not record these,
2856 	 delaying trace creation until the label.  */
2857       else if (save_point_p (insn)
2858 	       && (LABEL_P (insn) || !saw_barrier))
2859 	{
2860 	  memset (&ti, 0, sizeof (ti));
2861 	  ti.head = insn;
2862 	  ti.switch_sections = switch_sections;
2863 	  ti.id = trace_info.length ();
2864 	  trace_info.safe_push (ti);
2865 
2866 	  saw_barrier = false;
2867 	  switch_sections = false;
2868 	}
2869     }
2870 
2871   /* Create the trace index after we've finished building trace_info,
2872      avoiding stale pointer problems due to reallocation.  */
2873   trace_index
2874     = new hash_table<trace_info_hasher> (trace_info.length ());
2875   dw_trace_info *tp;
2876   FOR_EACH_VEC_ELT (trace_info, i, tp)
2877     {
2878       dw_trace_info **slot;
2879 
2880       if (dump_file)
2881 	fprintf (dump_file, "Creating trace %u : start at %s %d%s\n", tp->id,
2882 		 rtx_name[(int) GET_CODE (tp->head)], INSN_UID (tp->head),
2883 		 tp->switch_sections ? " (section switch)" : "");
2884 
2885       slot = trace_index->find_slot_with_hash (tp, INSN_UID (tp->head), INSERT);
2886       gcc_assert (*slot == NULL);
2887       *slot = tp;
2888     }
2889 }
2890 
2891 /* Record the initial position of the return address.  RTL is
2892    INCOMING_RETURN_ADDR_RTX.  */
2893 
2894 static void
2895 initial_return_save (rtx rtl)
2896 {
2897   unsigned int reg = INVALID_REGNUM;
2898   HOST_WIDE_INT offset = 0;
2899 
2900   switch (GET_CODE (rtl))
2901     {
2902     case REG:
2903       /* RA is in a register.  */
2904       reg = dwf_regno (rtl);
2905       break;
2906 
2907     case MEM:
2908       /* RA is on the stack.  */
2909       rtl = XEXP (rtl, 0);
2910       switch (GET_CODE (rtl))
2911 	{
2912 	case REG:
2913 	  gcc_assert (REGNO (rtl) == STACK_POINTER_REGNUM);
2914 	  offset = 0;
2915 	  break;
2916 
2917 	case PLUS:
2918 	  gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
2919 	  offset = INTVAL (XEXP (rtl, 1));
2920 	  break;
2921 
2922 	case MINUS:
2923 	  gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
2924 	  offset = -INTVAL (XEXP (rtl, 1));
2925 	  break;
2926 
2927 	default:
2928 	  gcc_unreachable ();
2929 	}
2930 
2931       break;
2932 
2933     case PLUS:
2934       /* The return address is at some offset from any value we can
2935 	 actually load.  For instance, on the SPARC it is in %i7+8. Just
2936 	 ignore the offset for now; it doesn't matter for unwinding frames.  */
2937       gcc_assert (CONST_INT_P (XEXP (rtl, 1)));
2938       initial_return_save (XEXP (rtl, 0));
2939       return;
2940 
2941     default:
2942       gcc_unreachable ();
2943     }
2944 
2945   if (reg != DWARF_FRAME_RETURN_COLUMN)
2946     {
2947       if (reg != INVALID_REGNUM)
2948         record_reg_saved_in_reg (rtl, pc_rtx);
2949       reg_save (DWARF_FRAME_RETURN_COLUMN, reg, offset - cur_row->cfa.offset);
2950     }
2951 }
2952 
2953 static void
2954 create_cie_data (void)
2955 {
2956   dw_cfa_location loc;
2957   dw_trace_info cie_trace;
2958 
2959   dw_stack_pointer_regnum = DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM);
2960 
2961   memset (&cie_trace, 0, sizeof (cie_trace));
2962   cur_trace = &cie_trace;
2963 
2964   add_cfi_vec = &cie_cfi_vec;
2965   cie_cfi_row = cur_row = new_cfi_row ();
2966 
2967   /* On entry, the Canonical Frame Address is at SP.  */
2968   memset (&loc, 0, sizeof (loc));
2969   loc.reg = dw_stack_pointer_regnum;
2970   loc.offset = INCOMING_FRAME_SP_OFFSET;
2971   def_cfa_1 (&loc);
2972 
2973   if (targetm.debug_unwind_info () == UI_DWARF2
2974       || targetm_common.except_unwind_info (&global_options) == UI_DWARF2)
2975     {
2976       initial_return_save (INCOMING_RETURN_ADDR_RTX);
2977 
2978       /* For a few targets, we have the return address incoming into a
2979 	 register, but choose a different return column.  This will result
2980 	 in a DW_CFA_register for the return, and an entry in
2981 	 regs_saved_in_regs to match.  If the target later stores that
2982 	 return address register to the stack, we want to be able to emit
2983 	 the DW_CFA_offset against the return column, not the intermediate
2984 	 save register.  Save the contents of regs_saved_in_regs so that
2985 	 we can re-initialize it at the start of each function.  */
2986       switch (cie_trace.regs_saved_in_regs.length ())
2987 	{
2988 	case 0:
2989 	  break;
2990 	case 1:
2991 	  cie_return_save = ggc_alloc<reg_saved_in_data> ();
2992 	  *cie_return_save = cie_trace.regs_saved_in_regs[0];
2993 	  cie_trace.regs_saved_in_regs.release ();
2994 	  break;
2995 	default:
2996 	  gcc_unreachable ();
2997 	}
2998     }
2999 
3000   add_cfi_vec = NULL;
3001   cur_row = NULL;
3002   cur_trace = NULL;
3003 }
3004 
3005 /* Annotate the function with NOTE_INSN_CFI notes to record the CFI
3006    state at each location within the function.  These notes will be
3007    emitted during pass_final.  */
3008 
3009 static unsigned int
3010 execute_dwarf2_frame (void)
3011 {
3012   /* Different HARD_FRAME_POINTER_REGNUM might coexist in the same file.  */
3013   dw_frame_pointer_regnum = DWARF_FRAME_REGNUM (HARD_FRAME_POINTER_REGNUM);
3014 
3015   /* The first time we're called, compute the incoming frame state.  */
3016   if (cie_cfi_vec == NULL)
3017     create_cie_data ();
3018 
3019   dwarf2out_alloc_current_fde ();
3020 
3021   create_pseudo_cfg ();
3022 
3023   /* Do the work.  */
3024   create_cfi_notes ();
3025   connect_traces ();
3026   add_cfis_to_fde ();
3027 
3028   /* Free all the data we allocated.  */
3029   {
3030     size_t i;
3031     dw_trace_info *ti;
3032 
3033     FOR_EACH_VEC_ELT (trace_info, i, ti)
3034       ti->regs_saved_in_regs.release ();
3035   }
3036   trace_info.release ();
3037 
3038   delete trace_index;
3039   trace_index = NULL;
3040 
3041   return 0;
3042 }
3043 
3044 /* Convert a DWARF call frame info. operation to its string name */
3045 
3046 static const char *
3047 dwarf_cfi_name (unsigned int cfi_opc)
3048 {
3049   const char *name = get_DW_CFA_name (cfi_opc);
3050 
3051   if (name != NULL)
3052     return name;
3053 
3054   return "DW_CFA_<unknown>";
3055 }
3056 
3057 /* This routine will generate the correct assembly data for a location
3058    description based on a cfi entry with a complex address.  */
3059 
3060 static void
3061 output_cfa_loc (dw_cfi_ref cfi, int for_eh)
3062 {
3063   dw_loc_descr_ref loc;
3064   unsigned long size;
3065 
3066   if (cfi->dw_cfi_opc == DW_CFA_expression
3067       || cfi->dw_cfi_opc == DW_CFA_val_expression)
3068     {
3069       unsigned r =
3070 	DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3071       dw2_asm_output_data (1, r, NULL);
3072       loc = cfi->dw_cfi_oprnd2.dw_cfi_loc;
3073     }
3074   else
3075     loc = cfi->dw_cfi_oprnd1.dw_cfi_loc;
3076 
3077   /* Output the size of the block.  */
3078   size = size_of_locs (loc);
3079   dw2_asm_output_data_uleb128 (size, NULL);
3080 
3081   /* Now output the operations themselves.  */
3082   output_loc_sequence (loc, for_eh);
3083 }
3084 
3085 /* Similar, but used for .cfi_escape.  */
3086 
3087 static void
3088 output_cfa_loc_raw (dw_cfi_ref cfi)
3089 {
3090   dw_loc_descr_ref loc;
3091   unsigned long size;
3092 
3093   if (cfi->dw_cfi_opc == DW_CFA_expression
3094       || cfi->dw_cfi_opc == DW_CFA_val_expression)
3095     {
3096       unsigned r =
3097 	DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3098       fprintf (asm_out_file, "%#x,", r);
3099       loc = cfi->dw_cfi_oprnd2.dw_cfi_loc;
3100     }
3101   else
3102     loc = cfi->dw_cfi_oprnd1.dw_cfi_loc;
3103 
3104   /* Output the size of the block.  */
3105   size = size_of_locs (loc);
3106   dw2_asm_output_data_uleb128_raw (size);
3107   fputc (',', asm_out_file);
3108 
3109   /* Now output the operations themselves.  */
3110   output_loc_sequence_raw (loc);
3111 }
3112 
3113 /* Output a Call Frame Information opcode and its operand(s).  */
3114 
3115 void
3116 output_cfi (dw_cfi_ref cfi, dw_fde_ref fde, int for_eh)
3117 {
3118   unsigned long r;
3119   HOST_WIDE_INT off;
3120 
3121   if (cfi->dw_cfi_opc == DW_CFA_advance_loc)
3122     dw2_asm_output_data (1, (cfi->dw_cfi_opc
3123 			     | (cfi->dw_cfi_oprnd1.dw_cfi_offset & 0x3f)),
3124 			 "DW_CFA_advance_loc " HOST_WIDE_INT_PRINT_HEX,
3125 			 ((unsigned HOST_WIDE_INT)
3126 			  cfi->dw_cfi_oprnd1.dw_cfi_offset));
3127   else if (cfi->dw_cfi_opc == DW_CFA_offset)
3128     {
3129       r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3130       dw2_asm_output_data (1, (cfi->dw_cfi_opc | (r & 0x3f)),
3131 			   "DW_CFA_offset, column %#lx", r);
3132       off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3133       dw2_asm_output_data_uleb128 (off, NULL);
3134     }
3135   else if (cfi->dw_cfi_opc == DW_CFA_restore)
3136     {
3137       r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3138       dw2_asm_output_data (1, (cfi->dw_cfi_opc | (r & 0x3f)),
3139 			   "DW_CFA_restore, column %#lx", r);
3140     }
3141   else
3142     {
3143       dw2_asm_output_data (1, cfi->dw_cfi_opc,
3144 			   "%s", dwarf_cfi_name (cfi->dw_cfi_opc));
3145 
3146       switch (cfi->dw_cfi_opc)
3147 	{
3148 	case DW_CFA_set_loc:
3149 	  if (for_eh)
3150 	    dw2_asm_output_encoded_addr_rtx (
3151 		ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/1, /*global=*/0),
3152 		gen_rtx_SYMBOL_REF (Pmode, cfi->dw_cfi_oprnd1.dw_cfi_addr),
3153 		false, NULL);
3154 	  else
3155 	    dw2_asm_output_addr (DWARF2_ADDR_SIZE,
3156 				 cfi->dw_cfi_oprnd1.dw_cfi_addr, NULL);
3157 	  fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3158 	  break;
3159 
3160 	case DW_CFA_advance_loc1:
3161 	  dw2_asm_output_delta (1, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3162 				fde->dw_fde_current_label, NULL);
3163 	  fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3164 	  break;
3165 
3166 	case DW_CFA_advance_loc2:
3167 	  dw2_asm_output_delta (2, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3168 				fde->dw_fde_current_label, NULL);
3169 	  fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3170 	  break;
3171 
3172 	case DW_CFA_advance_loc4:
3173 	  dw2_asm_output_delta (4, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3174 				fde->dw_fde_current_label, NULL);
3175 	  fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3176 	  break;
3177 
3178 	case DW_CFA_MIPS_advance_loc8:
3179 	  dw2_asm_output_delta (8, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3180 				fde->dw_fde_current_label, NULL);
3181 	  fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3182 	  break;
3183 
3184 	case DW_CFA_offset_extended:
3185 	  r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3186 	  dw2_asm_output_data_uleb128 (r, NULL);
3187 	  off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3188 	  dw2_asm_output_data_uleb128 (off, NULL);
3189 	  break;
3190 
3191 	case DW_CFA_def_cfa:
3192 	  r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3193 	  dw2_asm_output_data_uleb128 (r, NULL);
3194 	  dw2_asm_output_data_uleb128 (cfi->dw_cfi_oprnd2.dw_cfi_offset, NULL);
3195 	  break;
3196 
3197 	case DW_CFA_offset_extended_sf:
3198 	  r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3199 	  dw2_asm_output_data_uleb128 (r, NULL);
3200 	  off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3201 	  dw2_asm_output_data_sleb128 (off, NULL);
3202 	  break;
3203 
3204 	case DW_CFA_def_cfa_sf:
3205 	  r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3206 	  dw2_asm_output_data_uleb128 (r, NULL);
3207 	  off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3208 	  dw2_asm_output_data_sleb128 (off, NULL);
3209 	  break;
3210 
3211 	case DW_CFA_restore_extended:
3212 	case DW_CFA_undefined:
3213 	case DW_CFA_same_value:
3214 	case DW_CFA_def_cfa_register:
3215 	  r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3216 	  dw2_asm_output_data_uleb128 (r, NULL);
3217 	  break;
3218 
3219 	case DW_CFA_register:
3220 	  r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3221 	  dw2_asm_output_data_uleb128 (r, NULL);
3222 	  r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd2.dw_cfi_reg_num, for_eh);
3223 	  dw2_asm_output_data_uleb128 (r, NULL);
3224 	  break;
3225 
3226 	case DW_CFA_def_cfa_offset:
3227 	case DW_CFA_GNU_args_size:
3228 	  dw2_asm_output_data_uleb128 (cfi->dw_cfi_oprnd1.dw_cfi_offset, NULL);
3229 	  break;
3230 
3231 	case DW_CFA_def_cfa_offset_sf:
3232 	  off = div_data_align (cfi->dw_cfi_oprnd1.dw_cfi_offset);
3233 	  dw2_asm_output_data_sleb128 (off, NULL);
3234 	  break;
3235 
3236 	case DW_CFA_GNU_window_save:
3237 	  break;
3238 
3239 	case DW_CFA_def_cfa_expression:
3240 	case DW_CFA_expression:
3241 	case DW_CFA_val_expression:
3242 	  output_cfa_loc (cfi, for_eh);
3243 	  break;
3244 
3245 	case DW_CFA_GNU_negative_offset_extended:
3246 	  /* Obsoleted by DW_CFA_offset_extended_sf.  */
3247 	  gcc_unreachable ();
3248 
3249 	default:
3250 	  break;
3251 	}
3252     }
3253 }
3254 
3255 /* Similar, but do it via assembler directives instead.  */
3256 
3257 void
3258 output_cfi_directive (FILE *f, dw_cfi_ref cfi)
3259 {
3260   unsigned long r, r2;
3261 
3262   switch (cfi->dw_cfi_opc)
3263     {
3264     case DW_CFA_advance_loc:
3265     case DW_CFA_advance_loc1:
3266     case DW_CFA_advance_loc2:
3267     case DW_CFA_advance_loc4:
3268     case DW_CFA_MIPS_advance_loc8:
3269     case DW_CFA_set_loc:
3270       /* Should only be created in a code path not followed when emitting
3271 	 via directives.  The assembler is going to take care of this for
3272 	 us.  But this routines is also used for debugging dumps, so
3273 	 print something.  */
3274       gcc_assert (f != asm_out_file);
3275       fprintf (f, "\t.cfi_advance_loc\n");
3276       break;
3277 
3278     case DW_CFA_offset:
3279     case DW_CFA_offset_extended:
3280     case DW_CFA_offset_extended_sf:
3281       r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3282       fprintf (f, "\t.cfi_offset %lu, " HOST_WIDE_INT_PRINT_DEC"\n",
3283 	       r, cfi->dw_cfi_oprnd2.dw_cfi_offset);
3284       break;
3285 
3286     case DW_CFA_restore:
3287     case DW_CFA_restore_extended:
3288       r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3289       fprintf (f, "\t.cfi_restore %lu\n", r);
3290       break;
3291 
3292     case DW_CFA_undefined:
3293       r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3294       fprintf (f, "\t.cfi_undefined %lu\n", r);
3295       break;
3296 
3297     case DW_CFA_same_value:
3298       r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3299       fprintf (f, "\t.cfi_same_value %lu\n", r);
3300       break;
3301 
3302     case DW_CFA_def_cfa:
3303     case DW_CFA_def_cfa_sf:
3304       r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3305       fprintf (f, "\t.cfi_def_cfa %lu, " HOST_WIDE_INT_PRINT_DEC"\n",
3306 	       r, cfi->dw_cfi_oprnd2.dw_cfi_offset);
3307       break;
3308 
3309     case DW_CFA_def_cfa_register:
3310       r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3311       fprintf (f, "\t.cfi_def_cfa_register %lu\n", r);
3312       break;
3313 
3314     case DW_CFA_register:
3315       r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3316       r2 = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd2.dw_cfi_reg_num, 1);
3317       fprintf (f, "\t.cfi_register %lu, %lu\n", r, r2);
3318       break;
3319 
3320     case DW_CFA_def_cfa_offset:
3321     case DW_CFA_def_cfa_offset_sf:
3322       fprintf (f, "\t.cfi_def_cfa_offset "
3323 	       HOST_WIDE_INT_PRINT_DEC"\n",
3324 	       cfi->dw_cfi_oprnd1.dw_cfi_offset);
3325       break;
3326 
3327     case DW_CFA_remember_state:
3328       fprintf (f, "\t.cfi_remember_state\n");
3329       break;
3330     case DW_CFA_restore_state:
3331       fprintf (f, "\t.cfi_restore_state\n");
3332       break;
3333 
3334     case DW_CFA_GNU_args_size:
3335       if (f == asm_out_file)
3336 	{
3337 	  fprintf (f, "\t.cfi_escape %#x,", DW_CFA_GNU_args_size);
3338 	  dw2_asm_output_data_uleb128_raw (cfi->dw_cfi_oprnd1.dw_cfi_offset);
3339 	  if (flag_debug_asm)
3340 	    fprintf (f, "\t%s args_size " HOST_WIDE_INT_PRINT_DEC,
3341 		     ASM_COMMENT_START, cfi->dw_cfi_oprnd1.dw_cfi_offset);
3342 	  fputc ('\n', f);
3343 	}
3344       else
3345 	{
3346 	  fprintf (f, "\t.cfi_GNU_args_size " HOST_WIDE_INT_PRINT_DEC "\n",
3347 		   cfi->dw_cfi_oprnd1.dw_cfi_offset);
3348 	}
3349       break;
3350 
3351     case DW_CFA_GNU_window_save:
3352       fprintf (f, "\t.cfi_window_save\n");
3353       break;
3354 
3355     case DW_CFA_def_cfa_expression:
3356     case DW_CFA_expression:
3357     case DW_CFA_val_expression:
3358       if (f != asm_out_file)
3359 	{
3360 	  fprintf (f, "\t.cfi_%scfa_%sexpression ...\n",
3361 		   cfi->dw_cfi_opc == DW_CFA_def_cfa_expression ? "def_" : "",
3362 		   cfi->dw_cfi_opc == DW_CFA_val_expression ? "val_" : "");
3363 	  break;
3364 	}
3365       fprintf (f, "\t.cfi_escape %#x,", cfi->dw_cfi_opc);
3366       output_cfa_loc_raw (cfi);
3367       fputc ('\n', f);
3368       break;
3369 
3370     default:
3371       gcc_unreachable ();
3372     }
3373 }
3374 
3375 void
3376 dwarf2out_emit_cfi (dw_cfi_ref cfi)
3377 {
3378   if (dwarf2out_do_cfi_asm ())
3379     output_cfi_directive (asm_out_file, cfi);
3380 }
3381 
3382 static void
3383 dump_cfi_row (FILE *f, dw_cfi_row *row)
3384 {
3385   dw_cfi_ref cfi;
3386   unsigned i;
3387 
3388   cfi = row->cfa_cfi;
3389   if (!cfi)
3390     {
3391       dw_cfa_location dummy;
3392       memset (&dummy, 0, sizeof (dummy));
3393       dummy.reg = INVALID_REGNUM;
3394       cfi = def_cfa_0 (&dummy, &row->cfa);
3395     }
3396   output_cfi_directive (f, cfi);
3397 
3398   FOR_EACH_VEC_SAFE_ELT (row->reg_save, i, cfi)
3399     if (cfi)
3400       output_cfi_directive (f, cfi);
3401 }
3402 
3403 void debug_cfi_row (dw_cfi_row *row);
3404 
3405 void
3406 debug_cfi_row (dw_cfi_row *row)
3407 {
3408   dump_cfi_row (stderr, row);
3409 }
3410 
3411 
3412 /* Save the result of dwarf2out_do_frame across PCH.
3413    This variable is tri-state, with 0 unset, >0 true, <0 false.  */
3414 static GTY(()) signed char saved_do_cfi_asm = 0;
3415 
3416 /* Decide whether we want to emit frame unwind information for the current
3417    translation unit.  */
3418 
3419 bool
3420 dwarf2out_do_frame (void)
3421 {
3422   /* We want to emit correct CFA location expressions or lists, so we
3423      have to return true if we're going to output debug info, even if
3424      we're not going to output frame or unwind info.  */
3425   if (write_symbols == DWARF2_DEBUG || write_symbols == VMS_AND_DWARF2_DEBUG)
3426     return true;
3427 
3428   if (saved_do_cfi_asm > 0)
3429     return true;
3430 
3431   if (targetm.debug_unwind_info () == UI_DWARF2)
3432     return true;
3433 
3434   if ((flag_unwind_tables || flag_exceptions)
3435       && targetm_common.except_unwind_info (&global_options) == UI_DWARF2)
3436     return true;
3437 
3438   return false;
3439 }
3440 
3441 /* Decide whether to emit frame unwind via assembler directives.  */
3442 
3443 bool
3444 dwarf2out_do_cfi_asm (void)
3445 {
3446   int enc;
3447 
3448   if (saved_do_cfi_asm != 0)
3449     return saved_do_cfi_asm > 0;
3450 
3451   /* Assume failure for a moment.  */
3452   saved_do_cfi_asm = -1;
3453 
3454   if (!flag_dwarf2_cfi_asm || !dwarf2out_do_frame ())
3455     return false;
3456   if (!HAVE_GAS_CFI_PERSONALITY_DIRECTIVE)
3457     return false;
3458 
3459   /* Make sure the personality encoding is one the assembler can support.
3460      In particular, aligned addresses can't be handled.  */
3461   enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/2,/*global=*/1);
3462   if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
3463     return false;
3464   enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/0,/*global=*/0);
3465   if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
3466     return false;
3467 
3468   /* If we can't get the assembler to emit only .debug_frame, and we don't need
3469      dwarf2 unwind info for exceptions, then emit .debug_frame by hand.  */
3470   if (!HAVE_GAS_CFI_SECTIONS_DIRECTIVE
3471       && !flag_unwind_tables && !flag_exceptions
3472       && targetm_common.except_unwind_info (&global_options) != UI_DWARF2)
3473     return false;
3474 
3475   /* Success!  */
3476   saved_do_cfi_asm = 1;
3477   return true;
3478 }
3479 
3480 namespace {
3481 
3482 const pass_data pass_data_dwarf2_frame =
3483 {
3484   RTL_PASS, /* type */
3485   "dwarf2", /* name */
3486   OPTGROUP_NONE, /* optinfo_flags */
3487   TV_FINAL, /* tv_id */
3488   0, /* properties_required */
3489   0, /* properties_provided */
3490   0, /* properties_destroyed */
3491   0, /* todo_flags_start */
3492   0, /* todo_flags_finish */
3493 };
3494 
3495 class pass_dwarf2_frame : public rtl_opt_pass
3496 {
3497 public:
3498   pass_dwarf2_frame (gcc::context *ctxt)
3499     : rtl_opt_pass (pass_data_dwarf2_frame, ctxt)
3500   {}
3501 
3502   /* opt_pass methods: */
3503   virtual bool gate (function *);
3504   virtual unsigned int execute (function *) { return execute_dwarf2_frame (); }
3505 
3506 }; // class pass_dwarf2_frame
3507 
3508 bool
3509 pass_dwarf2_frame::gate (function *)
3510 {
3511   /* Targets which still implement the prologue in assembler text
3512      cannot use the generic dwarf2 unwinding.  */
3513   if (!targetm.have_prologue ())
3514     return false;
3515 
3516   /* ??? What to do for UI_TARGET unwinding?  They might be able to benefit
3517      from the optimized shrink-wrapping annotations that we will compute.
3518      For now, only produce the CFI notes for dwarf2.  */
3519   return dwarf2out_do_frame ();
3520 }
3521 
3522 } // anon namespace
3523 
3524 rtl_opt_pass *
3525 make_pass_dwarf2_frame (gcc::context *ctxt)
3526 {
3527   return new pass_dwarf2_frame (ctxt);
3528 }
3529 
3530 #include "gt-dwarf2cfi.h"
3531