xref: /netbsd-src/external/gpl3/gcc.old/dist/gcc/dwarf2cfi.c (revision 678f798eafb4a421dfa25a85d03455fafa0c7f50)
1 /* Dwarf2 Call Frame Information helper routines.
2    Copyright (C) 1992-2013 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24 #include "version.h"
25 #include "flags.h"
26 #include "rtl.h"
27 #include "function.h"
28 #include "basic-block.h"
29 #include "dwarf2.h"
30 #include "dwarf2out.h"
31 #include "dwarf2asm.h"
32 #include "ggc.h"
33 #include "tm_p.h"
34 #include "target.h"
35 #include "common/common-target.h"
36 #include "tree-pass.h"
37 
38 #include "except.h"		/* expand_builtin_dwarf_sp_column */
39 #include "expr.h"		/* init_return_column_size */
40 #include "regs.h"		/* expand_builtin_init_dwarf_reg_sizes */
41 #include "output.h"		/* asm_out_file */
42 #include "debug.h"		/* dwarf2out_do_frame, dwarf2out_do_cfi_asm */
43 
44 
45 /* ??? Poison these here until it can be done generically.  They've been
46    totally replaced in this file; make sure it stays that way.  */
47 #undef DWARF2_UNWIND_INFO
48 #undef DWARF2_FRAME_INFO
49 #if (GCC_VERSION >= 3000)
50  #pragma GCC poison DWARF2_UNWIND_INFO DWARF2_FRAME_INFO
51 #endif
52 
53 #ifndef INCOMING_RETURN_ADDR_RTX
54 #define INCOMING_RETURN_ADDR_RTX  (gcc_unreachable (), NULL_RTX)
55 #endif
56 
57 /* Maximum size (in bytes) of an artificially generated label.  */
58 #define MAX_ARTIFICIAL_LABEL_BYTES	30
59 
60 /* A collected description of an entire row of the abstract CFI table.  */
61 typedef struct GTY(()) dw_cfi_row_struct
62 {
63   /* The expression that computes the CFA, expressed in two different ways.
64      The CFA member for the simple cases, and the full CFI expression for
65      the complex cases.  The later will be a DW_CFA_cfa_expression.  */
66   dw_cfa_location cfa;
67   dw_cfi_ref cfa_cfi;
68 
69   /* The expressions for any register column that is saved.  */
70   cfi_vec reg_save;
71 } dw_cfi_row;
72 
73 /* The caller's ORIG_REG is saved in SAVED_IN_REG.  */
74 typedef struct GTY(()) reg_saved_in_data_struct {
75   rtx orig_reg;
76   rtx saved_in_reg;
77 } reg_saved_in_data;
78 
79 
80 /* Since we no longer have a proper CFG, we're going to create a facsimile
81    of one on the fly while processing the frame-related insns.
82 
83    We create dw_trace_info structures for each extended basic block beginning
84    and ending at a "save point".  Save points are labels, barriers, certain
85    notes, and of course the beginning and end of the function.
86 
87    As we encounter control transfer insns, we propagate the "current"
88    row state across the edges to the starts of traces.  When checking is
89    enabled, we validate that we propagate the same data from all sources.
90 
91    All traces are members of the TRACE_INFO array, in the order in which
92    they appear in the instruction stream.
93 
94    All save points are present in the TRACE_INDEX hash, mapping the insn
95    starting a trace to the dw_trace_info describing the trace.  */
96 
97 typedef struct
98 {
99   /* The insn that begins the trace.  */
100   rtx head;
101 
102   /* The row state at the beginning and end of the trace.  */
103   dw_cfi_row *beg_row, *end_row;
104 
105   /* Tracking for DW_CFA_GNU_args_size.  The "true" sizes are those we find
106      while scanning insns.  However, the args_size value is irrelevant at
107      any point except can_throw_internal_p insns.  Therefore the "delay"
108      sizes the values that must actually be emitted for this trace.  */
109   HOST_WIDE_INT beg_true_args_size, end_true_args_size;
110   HOST_WIDE_INT beg_delay_args_size, end_delay_args_size;
111 
112   /* The first EH insn in the trace, where beg_delay_args_size must be set.  */
113   rtx eh_head;
114 
115   /* The following variables contain data used in interpreting frame related
116      expressions.  These are not part of the "real" row state as defined by
117      Dwarf, but it seems like they need to be propagated into a trace in case
118      frame related expressions have been sunk.  */
119   /* ??? This seems fragile.  These variables are fragments of a larger
120      expression.  If we do not keep the entire expression together, we risk
121      not being able to put it together properly.  Consider forcing targets
122      to generate self-contained expressions and dropping all of the magic
123      interpretation code in this file.  Or at least refusing to shrink wrap
124      any frame related insn that doesn't contain a complete expression.  */
125 
126   /* The register used for saving registers to the stack, and its offset
127      from the CFA.  */
128   dw_cfa_location cfa_store;
129 
130   /* A temporary register holding an integral value used in adjusting SP
131      or setting up the store_reg.  The "offset" field holds the integer
132      value, not an offset.  */
133   dw_cfa_location cfa_temp;
134 
135   /* A set of registers saved in other registers.  This is the inverse of
136      the row->reg_save info, if the entry is a DW_CFA_register.  This is
137      implemented as a flat array because it normally contains zero or 1
138      entry, depending on the target.  IA-64 is the big spender here, using
139      a maximum of 5 entries.  */
140   vec<reg_saved_in_data> regs_saved_in_regs;
141 
142   /* An identifier for this trace.  Used only for debugging dumps.  */
143   unsigned id;
144 
145   /* True if this trace immediately follows NOTE_INSN_SWITCH_TEXT_SECTIONS.  */
146   bool switch_sections;
147 
148   /* True if we've seen different values incoming to beg_true_args_size.  */
149   bool args_size_undefined;
150 } dw_trace_info;
151 
152 
153 typedef dw_trace_info *dw_trace_info_ref;
154 
155 
156 /* The variables making up the pseudo-cfg, as described above.  */
157 static vec<dw_trace_info> trace_info;
158 static vec<dw_trace_info_ref> trace_work_list;
159 static htab_t trace_index;
160 
161 /* A vector of call frame insns for the CIE.  */
162 cfi_vec cie_cfi_vec;
163 
164 /* The state of the first row of the FDE table, which includes the
165    state provided by the CIE.  */
166 static GTY(()) dw_cfi_row *cie_cfi_row;
167 
168 static GTY(()) reg_saved_in_data *cie_return_save;
169 
170 static GTY(()) unsigned long dwarf2out_cfi_label_num;
171 
172 /* The insn after which a new CFI note should be emitted.  */
173 static rtx add_cfi_insn;
174 
175 /* When non-null, add_cfi will add the CFI to this vector.  */
176 static cfi_vec *add_cfi_vec;
177 
178 /* The current instruction trace.  */
179 static dw_trace_info *cur_trace;
180 
181 /* The current, i.e. most recently generated, row of the CFI table.  */
182 static dw_cfi_row *cur_row;
183 
184 /* A copy of the current CFA, for use during the processing of a
185    single insn.  */
186 static dw_cfa_location *cur_cfa;
187 
188 /* We delay emitting a register save until either (a) we reach the end
189    of the prologue or (b) the register is clobbered.  This clusters
190    register saves so that there are fewer pc advances.  */
191 
192 typedef struct {
193   rtx reg;
194   rtx saved_reg;
195   HOST_WIDE_INT cfa_offset;
196 } queued_reg_save;
197 
198 
199 static vec<queued_reg_save> queued_reg_saves;
200 
201 /* True if any CFI directives were emitted at the current insn.  */
202 static bool any_cfis_emitted;
203 
204 /* Short-hand for commonly used register numbers.  */
205 static unsigned dw_stack_pointer_regnum;
206 static unsigned dw_frame_pointer_regnum;
207 
208 /* Hook used by __throw.  */
209 
210 rtx
211 expand_builtin_dwarf_sp_column (void)
212 {
213   unsigned int dwarf_regnum = DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM);
214   return GEN_INT (DWARF2_FRAME_REG_OUT (dwarf_regnum, 1));
215 }
216 
217 /* MEM is a memory reference for the register size table, each element of
218    which has mode MODE.  Initialize column C as a return address column.  */
219 
220 static void
221 init_return_column_size (enum machine_mode mode, rtx mem, unsigned int c)
222 {
223   HOST_WIDE_INT offset = c * GET_MODE_SIZE (mode);
224   HOST_WIDE_INT size = GET_MODE_SIZE (Pmode);
225   emit_move_insn (adjust_address (mem, mode, offset), GEN_INT (size));
226 }
227 
228 /* Generate code to initialize the register size table.  */
229 
230 void
231 expand_builtin_init_dwarf_reg_sizes (tree address)
232 {
233   unsigned int i;
234   enum machine_mode mode = TYPE_MODE (char_type_node);
235   rtx addr = expand_normal (address);
236   rtx mem = gen_rtx_MEM (BLKmode, addr);
237   bool wrote_return_column = false;
238 
239   for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
240     {
241       unsigned int dnum = DWARF_FRAME_REGNUM (i);
242       unsigned int rnum = DWARF2_FRAME_REG_OUT (dnum, 1);
243 
244       if (rnum < DWARF_FRAME_REGISTERS)
245 	{
246 	  HOST_WIDE_INT offset = rnum * GET_MODE_SIZE (mode);
247 	  enum machine_mode save_mode = reg_raw_mode[i];
248 	  HOST_WIDE_INT size;
249 
250 	  if (HARD_REGNO_CALL_PART_CLOBBERED (i, save_mode))
251 	    save_mode = choose_hard_reg_mode (i, 1, true);
252 	  if (dnum == DWARF_FRAME_RETURN_COLUMN)
253 	    {
254 	      if (save_mode == VOIDmode)
255 		continue;
256 	      wrote_return_column = true;
257 	    }
258 	  size = GET_MODE_SIZE (save_mode);
259 	  if (offset < 0)
260 	    continue;
261 
262 	  emit_move_insn (adjust_address (mem, mode, offset),
263 			  gen_int_mode (size, mode));
264 	}
265     }
266 
267   if (!wrote_return_column)
268     init_return_column_size (mode, mem, DWARF_FRAME_RETURN_COLUMN);
269 
270 #ifdef DWARF_ALT_FRAME_RETURN_COLUMN
271   init_return_column_size (mode, mem, DWARF_ALT_FRAME_RETURN_COLUMN);
272 #endif
273 
274   targetm.init_dwarf_reg_sizes_extra (address);
275 }
276 
277 
278 static hashval_t
279 dw_trace_info_hash (const void *ptr)
280 {
281   const dw_trace_info *ti = (const dw_trace_info *) ptr;
282   return INSN_UID (ti->head);
283 }
284 
285 static int
286 dw_trace_info_eq (const void *ptr_a, const void *ptr_b)
287 {
288   const dw_trace_info *a = (const dw_trace_info *) ptr_a;
289   const dw_trace_info *b = (const dw_trace_info *) ptr_b;
290   return a->head == b->head;
291 }
292 
293 static dw_trace_info *
294 get_trace_info (rtx insn)
295 {
296   dw_trace_info dummy;
297   dummy.head = insn;
298   return (dw_trace_info *)
299     htab_find_with_hash (trace_index, &dummy, INSN_UID (insn));
300 }
301 
302 static bool
303 save_point_p (rtx insn)
304 {
305   /* Labels, except those that are really jump tables.  */
306   if (LABEL_P (insn))
307     return inside_basic_block_p (insn);
308 
309   /* We split traces at the prologue/epilogue notes because those
310      are points at which the unwind info is usually stable.  This
311      makes it easier to find spots with identical unwind info so
312      that we can use remember/restore_state opcodes.  */
313   if (NOTE_P (insn))
314     switch (NOTE_KIND (insn))
315       {
316       case NOTE_INSN_PROLOGUE_END:
317       case NOTE_INSN_EPILOGUE_BEG:
318 	return true;
319       }
320 
321   return false;
322 }
323 
324 /* Divide OFF by DWARF_CIE_DATA_ALIGNMENT, asserting no remainder.  */
325 
326 static inline HOST_WIDE_INT
327 div_data_align (HOST_WIDE_INT off)
328 {
329   HOST_WIDE_INT r = off / DWARF_CIE_DATA_ALIGNMENT;
330   gcc_assert (r * DWARF_CIE_DATA_ALIGNMENT == off);
331   return r;
332 }
333 
334 /* Return true if we need a signed version of a given opcode
335    (e.g. DW_CFA_offset_extended_sf vs DW_CFA_offset_extended).  */
336 
337 static inline bool
338 need_data_align_sf_opcode (HOST_WIDE_INT off)
339 {
340   return DWARF_CIE_DATA_ALIGNMENT < 0 ? off > 0 : off < 0;
341 }
342 
343 /* Return a pointer to a newly allocated Call Frame Instruction.  */
344 
345 static inline dw_cfi_ref
346 new_cfi (void)
347 {
348   dw_cfi_ref cfi = ggc_alloc_dw_cfi_node ();
349 
350   cfi->dw_cfi_oprnd1.dw_cfi_reg_num = 0;
351   cfi->dw_cfi_oprnd2.dw_cfi_reg_num = 0;
352 
353   return cfi;
354 }
355 
356 /* Return a newly allocated CFI row, with no defined data.  */
357 
358 static dw_cfi_row *
359 new_cfi_row (void)
360 {
361   dw_cfi_row *row = ggc_alloc_cleared_dw_cfi_row ();
362 
363   row->cfa.reg = INVALID_REGNUM;
364 
365   return row;
366 }
367 
368 /* Return a copy of an existing CFI row.  */
369 
370 static dw_cfi_row *
371 copy_cfi_row (dw_cfi_row *src)
372 {
373   dw_cfi_row *dst = ggc_alloc_dw_cfi_row ();
374 
375   *dst = *src;
376   dst->reg_save = vec_safe_copy (src->reg_save);
377 
378   return dst;
379 }
380 
381 /* Generate a new label for the CFI info to refer to.  */
382 
383 static char *
384 dwarf2out_cfi_label (void)
385 {
386   int num = dwarf2out_cfi_label_num++;
387   char label[20];
388 
389   ASM_GENERATE_INTERNAL_LABEL (label, "LCFI", num);
390 
391   return xstrdup (label);
392 }
393 
394 /* Add CFI either to the current insn stream or to a vector, or both.  */
395 
396 static void
397 add_cfi (dw_cfi_ref cfi)
398 {
399   any_cfis_emitted = true;
400 
401   if (add_cfi_insn != NULL)
402     {
403       add_cfi_insn = emit_note_after (NOTE_INSN_CFI, add_cfi_insn);
404       NOTE_CFI (add_cfi_insn) = cfi;
405     }
406 
407   if (add_cfi_vec != NULL)
408     vec_safe_push (*add_cfi_vec, cfi);
409 }
410 
411 static void
412 add_cfi_args_size (HOST_WIDE_INT size)
413 {
414   dw_cfi_ref cfi = new_cfi ();
415 
416   /* While we can occasionally have args_size < 0 internally, this state
417      should not persist at a point we actually need an opcode.  */
418   gcc_assert (size >= 0);
419 
420   cfi->dw_cfi_opc = DW_CFA_GNU_args_size;
421   cfi->dw_cfi_oprnd1.dw_cfi_offset = size;
422 
423   add_cfi (cfi);
424 }
425 
426 static void
427 add_cfi_restore (unsigned reg)
428 {
429   dw_cfi_ref cfi = new_cfi ();
430 
431   cfi->dw_cfi_opc = (reg & ~0x3f ? DW_CFA_restore_extended : DW_CFA_restore);
432   cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
433 
434   add_cfi (cfi);
435 }
436 
437 /* Perform ROW->REG_SAVE[COLUMN] = CFI.  CFI may be null, indicating
438    that the register column is no longer saved.  */
439 
440 static void
441 update_row_reg_save (dw_cfi_row *row, unsigned column, dw_cfi_ref cfi)
442 {
443   if (vec_safe_length (row->reg_save) <= column)
444     vec_safe_grow_cleared (row->reg_save, column + 1);
445   (*row->reg_save)[column] = cfi;
446 }
447 
448 /* This function fills in aa dw_cfa_location structure from a dwarf location
449    descriptor sequence.  */
450 
451 static void
452 get_cfa_from_loc_descr (dw_cfa_location *cfa, struct dw_loc_descr_struct *loc)
453 {
454   struct dw_loc_descr_struct *ptr;
455   cfa->offset = 0;
456   cfa->base_offset = 0;
457   cfa->indirect = 0;
458   cfa->reg = -1;
459 
460   for (ptr = loc; ptr != NULL; ptr = ptr->dw_loc_next)
461     {
462       enum dwarf_location_atom op = ptr->dw_loc_opc;
463 
464       switch (op)
465 	{
466 	case DW_OP_reg0:
467 	case DW_OP_reg1:
468 	case DW_OP_reg2:
469 	case DW_OP_reg3:
470 	case DW_OP_reg4:
471 	case DW_OP_reg5:
472 	case DW_OP_reg6:
473 	case DW_OP_reg7:
474 	case DW_OP_reg8:
475 	case DW_OP_reg9:
476 	case DW_OP_reg10:
477 	case DW_OP_reg11:
478 	case DW_OP_reg12:
479 	case DW_OP_reg13:
480 	case DW_OP_reg14:
481 	case DW_OP_reg15:
482 	case DW_OP_reg16:
483 	case DW_OP_reg17:
484 	case DW_OP_reg18:
485 	case DW_OP_reg19:
486 	case DW_OP_reg20:
487 	case DW_OP_reg21:
488 	case DW_OP_reg22:
489 	case DW_OP_reg23:
490 	case DW_OP_reg24:
491 	case DW_OP_reg25:
492 	case DW_OP_reg26:
493 	case DW_OP_reg27:
494 	case DW_OP_reg28:
495 	case DW_OP_reg29:
496 	case DW_OP_reg30:
497 	case DW_OP_reg31:
498 	  cfa->reg = op - DW_OP_reg0;
499 	  break;
500 	case DW_OP_regx:
501 	  cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
502 	  break;
503 	case DW_OP_breg0:
504 	case DW_OP_breg1:
505 	case DW_OP_breg2:
506 	case DW_OP_breg3:
507 	case DW_OP_breg4:
508 	case DW_OP_breg5:
509 	case DW_OP_breg6:
510 	case DW_OP_breg7:
511 	case DW_OP_breg8:
512 	case DW_OP_breg9:
513 	case DW_OP_breg10:
514 	case DW_OP_breg11:
515 	case DW_OP_breg12:
516 	case DW_OP_breg13:
517 	case DW_OP_breg14:
518 	case DW_OP_breg15:
519 	case DW_OP_breg16:
520 	case DW_OP_breg17:
521 	case DW_OP_breg18:
522 	case DW_OP_breg19:
523 	case DW_OP_breg20:
524 	case DW_OP_breg21:
525 	case DW_OP_breg22:
526 	case DW_OP_breg23:
527 	case DW_OP_breg24:
528 	case DW_OP_breg25:
529 	case DW_OP_breg26:
530 	case DW_OP_breg27:
531 	case DW_OP_breg28:
532 	case DW_OP_breg29:
533 	case DW_OP_breg30:
534 	case DW_OP_breg31:
535 	  cfa->reg = op - DW_OP_breg0;
536 	  cfa->base_offset = ptr->dw_loc_oprnd1.v.val_int;
537 	  break;
538 	case DW_OP_bregx:
539 	  cfa->reg = ptr->dw_loc_oprnd1.v.val_int;
540 	  cfa->base_offset = ptr->dw_loc_oprnd2.v.val_int;
541 	  break;
542 	case DW_OP_deref:
543 	  cfa->indirect = 1;
544 	  break;
545 	case DW_OP_plus_uconst:
546 	  cfa->offset = ptr->dw_loc_oprnd1.v.val_unsigned;
547 	  break;
548 	default:
549 	  gcc_unreachable ();
550 	}
551     }
552 }
553 
554 /* Find the previous value for the CFA, iteratively.  CFI is the opcode
555    to interpret, *LOC will be updated as necessary, *REMEMBER is used for
556    one level of remember/restore state processing.  */
557 
558 void
559 lookup_cfa_1 (dw_cfi_ref cfi, dw_cfa_location *loc, dw_cfa_location *remember)
560 {
561   switch (cfi->dw_cfi_opc)
562     {
563     case DW_CFA_def_cfa_offset:
564     case DW_CFA_def_cfa_offset_sf:
565       loc->offset = cfi->dw_cfi_oprnd1.dw_cfi_offset;
566       break;
567     case DW_CFA_def_cfa_register:
568       loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
569       break;
570     case DW_CFA_def_cfa:
571     case DW_CFA_def_cfa_sf:
572       loc->reg = cfi->dw_cfi_oprnd1.dw_cfi_reg_num;
573       loc->offset = cfi->dw_cfi_oprnd2.dw_cfi_offset;
574       break;
575     case DW_CFA_def_cfa_expression:
576       get_cfa_from_loc_descr (loc, cfi->dw_cfi_oprnd1.dw_cfi_loc);
577       break;
578 
579     case DW_CFA_remember_state:
580       gcc_assert (!remember->in_use);
581       *remember = *loc;
582       remember->in_use = 1;
583       break;
584     case DW_CFA_restore_state:
585       gcc_assert (remember->in_use);
586       *loc = *remember;
587       remember->in_use = 0;
588       break;
589 
590     default:
591       break;
592     }
593 }
594 
595 /* Determine if two dw_cfa_location structures define the same data.  */
596 
597 bool
598 cfa_equal_p (const dw_cfa_location *loc1, const dw_cfa_location *loc2)
599 {
600   return (loc1->reg == loc2->reg
601 	  && loc1->offset == loc2->offset
602 	  && loc1->indirect == loc2->indirect
603 	  && (loc1->indirect == 0
604 	      || loc1->base_offset == loc2->base_offset));
605 }
606 
607 /* Determine if two CFI operands are identical.  */
608 
609 static bool
610 cfi_oprnd_equal_p (enum dw_cfi_oprnd_type t, dw_cfi_oprnd *a, dw_cfi_oprnd *b)
611 {
612   switch (t)
613     {
614     case dw_cfi_oprnd_unused:
615       return true;
616     case dw_cfi_oprnd_reg_num:
617       return a->dw_cfi_reg_num == b->dw_cfi_reg_num;
618     case dw_cfi_oprnd_offset:
619       return a->dw_cfi_offset == b->dw_cfi_offset;
620     case dw_cfi_oprnd_addr:
621       return (a->dw_cfi_addr == b->dw_cfi_addr
622 	      || strcmp (a->dw_cfi_addr, b->dw_cfi_addr) == 0);
623     case dw_cfi_oprnd_loc:
624       return loc_descr_equal_p (a->dw_cfi_loc, b->dw_cfi_loc);
625     }
626   gcc_unreachable ();
627 }
628 
629 /* Determine if two CFI entries are identical.  */
630 
631 static bool
632 cfi_equal_p (dw_cfi_ref a, dw_cfi_ref b)
633 {
634   enum dwarf_call_frame_info opc;
635 
636   /* Make things easier for our callers, including missing operands.  */
637   if (a == b)
638     return true;
639   if (a == NULL || b == NULL)
640     return false;
641 
642   /* Obviously, the opcodes must match.  */
643   opc = a->dw_cfi_opc;
644   if (opc != b->dw_cfi_opc)
645     return false;
646 
647   /* Compare the two operands, re-using the type of the operands as
648      already exposed elsewhere.  */
649   return (cfi_oprnd_equal_p (dw_cfi_oprnd1_desc (opc),
650 			     &a->dw_cfi_oprnd1, &b->dw_cfi_oprnd1)
651 	  && cfi_oprnd_equal_p (dw_cfi_oprnd2_desc (opc),
652 				&a->dw_cfi_oprnd2, &b->dw_cfi_oprnd2));
653 }
654 
655 /* Determine if two CFI_ROW structures are identical.  */
656 
657 static bool
658 cfi_row_equal_p (dw_cfi_row *a, dw_cfi_row *b)
659 {
660   size_t i, n_a, n_b, n_max;
661 
662   if (a->cfa_cfi)
663     {
664       if (!cfi_equal_p (a->cfa_cfi, b->cfa_cfi))
665 	return false;
666     }
667   else if (!cfa_equal_p (&a->cfa, &b->cfa))
668     return false;
669 
670   n_a = vec_safe_length (a->reg_save);
671   n_b = vec_safe_length (b->reg_save);
672   n_max = MAX (n_a, n_b);
673 
674   for (i = 0; i < n_max; ++i)
675     {
676       dw_cfi_ref r_a = NULL, r_b = NULL;
677 
678       if (i < n_a)
679 	r_a = (*a->reg_save)[i];
680       if (i < n_b)
681 	r_b = (*b->reg_save)[i];
682 
683       if (!cfi_equal_p (r_a, r_b))
684         return false;
685     }
686 
687   return true;
688 }
689 
690 /* The CFA is now calculated from NEW_CFA.  Consider OLD_CFA in determining
691    what opcode to emit.  Returns the CFI opcode to effect the change, or
692    NULL if NEW_CFA == OLD_CFA.  */
693 
694 static dw_cfi_ref
695 def_cfa_0 (dw_cfa_location *old_cfa, dw_cfa_location *new_cfa)
696 {
697   dw_cfi_ref cfi;
698 
699   /* If nothing changed, no need to issue any call frame instructions.  */
700   if (cfa_equal_p (old_cfa, new_cfa))
701     return NULL;
702 
703   cfi = new_cfi ();
704 
705   if (new_cfa->reg == old_cfa->reg && !new_cfa->indirect && !old_cfa->indirect)
706     {
707       /* Construct a "DW_CFA_def_cfa_offset <offset>" instruction, indicating
708 	 the CFA register did not change but the offset did.  The data
709 	 factoring for DW_CFA_def_cfa_offset_sf happens in output_cfi, or
710 	 in the assembler via the .cfi_def_cfa_offset directive.  */
711       if (new_cfa->offset < 0)
712 	cfi->dw_cfi_opc = DW_CFA_def_cfa_offset_sf;
713       else
714 	cfi->dw_cfi_opc = DW_CFA_def_cfa_offset;
715       cfi->dw_cfi_oprnd1.dw_cfi_offset = new_cfa->offset;
716     }
717   else if (new_cfa->offset == old_cfa->offset
718 	   && old_cfa->reg != INVALID_REGNUM
719 	   && !new_cfa->indirect
720 	   && !old_cfa->indirect)
721     {
722       /* Construct a "DW_CFA_def_cfa_register <register>" instruction,
723 	 indicating the CFA register has changed to <register> but the
724 	 offset has not changed.  */
725       cfi->dw_cfi_opc = DW_CFA_def_cfa_register;
726       cfi->dw_cfi_oprnd1.dw_cfi_reg_num = new_cfa->reg;
727     }
728   else if (new_cfa->indirect == 0)
729     {
730       /* Construct a "DW_CFA_def_cfa <register> <offset>" instruction,
731 	 indicating the CFA register has changed to <register> with
732 	 the specified offset.  The data factoring for DW_CFA_def_cfa_sf
733 	 happens in output_cfi, or in the assembler via the .cfi_def_cfa
734 	 directive.  */
735       if (new_cfa->offset < 0)
736 	cfi->dw_cfi_opc = DW_CFA_def_cfa_sf;
737       else
738 	cfi->dw_cfi_opc = DW_CFA_def_cfa;
739       cfi->dw_cfi_oprnd1.dw_cfi_reg_num = new_cfa->reg;
740       cfi->dw_cfi_oprnd2.dw_cfi_offset = new_cfa->offset;
741     }
742   else
743     {
744       /* Construct a DW_CFA_def_cfa_expression instruction to
745 	 calculate the CFA using a full location expression since no
746 	 register-offset pair is available.  */
747       struct dw_loc_descr_struct *loc_list;
748 
749       cfi->dw_cfi_opc = DW_CFA_def_cfa_expression;
750       loc_list = build_cfa_loc (new_cfa, 0);
751       cfi->dw_cfi_oprnd1.dw_cfi_loc = loc_list;
752     }
753 
754   return cfi;
755 }
756 
757 /* Similarly, but take OLD_CFA from CUR_ROW, and update it after the fact.  */
758 
759 static void
760 def_cfa_1 (dw_cfa_location *new_cfa)
761 {
762   dw_cfi_ref cfi;
763 
764   if (cur_trace->cfa_store.reg == new_cfa->reg && new_cfa->indirect == 0)
765     cur_trace->cfa_store.offset = new_cfa->offset;
766 
767   cfi = def_cfa_0 (&cur_row->cfa, new_cfa);
768   if (cfi)
769     {
770       cur_row->cfa = *new_cfa;
771       cur_row->cfa_cfi = (cfi->dw_cfi_opc == DW_CFA_def_cfa_expression
772 			  ? cfi : NULL);
773 
774       add_cfi (cfi);
775     }
776 }
777 
778 /* Add the CFI for saving a register.  REG is the CFA column number.
779    If SREG is -1, the register is saved at OFFSET from the CFA;
780    otherwise it is saved in SREG.  */
781 
782 static void
783 reg_save (unsigned int reg, unsigned int sreg, HOST_WIDE_INT offset)
784 {
785   dw_fde_ref fde = cfun ? cfun->fde : NULL;
786   dw_cfi_ref cfi = new_cfi ();
787 
788   cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
789 
790   /* When stack is aligned, store REG using DW_CFA_expression with FP.  */
791   if (fde
792       && fde->stack_realign
793       && sreg == INVALID_REGNUM)
794     {
795       cfi->dw_cfi_opc = DW_CFA_expression;
796       cfi->dw_cfi_oprnd1.dw_cfi_reg_num = reg;
797       cfi->dw_cfi_oprnd2.dw_cfi_loc
798 	= build_cfa_aligned_loc (&cur_row->cfa, offset,
799 				 fde->stack_realignment);
800     }
801   else if (sreg == INVALID_REGNUM)
802     {
803       if (need_data_align_sf_opcode (offset))
804 	cfi->dw_cfi_opc = DW_CFA_offset_extended_sf;
805       else if (reg & ~0x3f)
806 	cfi->dw_cfi_opc = DW_CFA_offset_extended;
807       else
808 	cfi->dw_cfi_opc = DW_CFA_offset;
809       cfi->dw_cfi_oprnd2.dw_cfi_offset = offset;
810     }
811   else if (sreg == reg)
812     {
813       /* While we could emit something like DW_CFA_same_value or
814 	 DW_CFA_restore, we never expect to see something like that
815 	 in a prologue.  This is more likely to be a bug.  A backend
816 	 can always bypass this by using REG_CFA_RESTORE directly.  */
817       gcc_unreachable ();
818     }
819   else
820     {
821       cfi->dw_cfi_opc = DW_CFA_register;
822       cfi->dw_cfi_oprnd2.dw_cfi_reg_num = sreg;
823     }
824 
825   add_cfi (cfi);
826   update_row_reg_save (cur_row, reg, cfi);
827 }
828 
829 /* A subroutine of scan_trace.  Check INSN for a REG_ARGS_SIZE note
830    and adjust data structures to match.  */
831 
832 static void
833 notice_args_size (rtx insn)
834 {
835   HOST_WIDE_INT args_size, delta;
836   rtx note;
837 
838   note = find_reg_note (insn, REG_ARGS_SIZE, NULL);
839   if (note == NULL)
840     return;
841 
842   args_size = INTVAL (XEXP (note, 0));
843   delta = args_size - cur_trace->end_true_args_size;
844   if (delta == 0)
845     return;
846 
847   cur_trace->end_true_args_size = args_size;
848 
849   /* If the CFA is computed off the stack pointer, then we must adjust
850      the computation of the CFA as well.  */
851   if (cur_cfa->reg == dw_stack_pointer_regnum)
852     {
853       gcc_assert (!cur_cfa->indirect);
854 
855       /* Convert a change in args_size (always a positive in the
856 	 direction of stack growth) to a change in stack pointer.  */
857 #ifndef STACK_GROWS_DOWNWARD
858       delta = -delta;
859 #endif
860       cur_cfa->offset += delta;
861     }
862 }
863 
864 /* A subroutine of scan_trace.  INSN is can_throw_internal.  Update the
865    data within the trace related to EH insns and args_size.  */
866 
867 static void
868 notice_eh_throw (rtx insn)
869 {
870   HOST_WIDE_INT args_size;
871 
872   args_size = cur_trace->end_true_args_size;
873   if (cur_trace->eh_head == NULL)
874     {
875       cur_trace->eh_head = insn;
876       cur_trace->beg_delay_args_size = args_size;
877       cur_trace->end_delay_args_size = args_size;
878     }
879   else if (cur_trace->end_delay_args_size != args_size)
880     {
881       cur_trace->end_delay_args_size = args_size;
882 
883       /* ??? If the CFA is the stack pointer, search backward for the last
884 	 CFI note and insert there.  Given that the stack changed for the
885 	 args_size change, there *must* be such a note in between here and
886 	 the last eh insn.  */
887       add_cfi_args_size (args_size);
888     }
889 }
890 
891 /* Short-hand inline for the very common D_F_R (REGNO (x)) operation.  */
892 /* ??? This ought to go into dwarf2out.h, except that dwarf2out.h is
893    used in places where rtl is prohibited.  */
894 
895 static inline unsigned
896 dwf_regno (const_rtx reg)
897 {
898   return DWARF_FRAME_REGNUM (REGNO (reg));
899 }
900 
901 /* Compare X and Y for equivalence.  The inputs may be REGs or PC_RTX.  */
902 
903 static bool
904 compare_reg_or_pc (rtx x, rtx y)
905 {
906   if (REG_P (x) && REG_P (y))
907     return REGNO (x) == REGNO (y);
908   return x == y;
909 }
910 
911 /* Record SRC as being saved in DEST.  DEST may be null to delete an
912    existing entry.  SRC may be a register or PC_RTX.  */
913 
914 static void
915 record_reg_saved_in_reg (rtx dest, rtx src)
916 {
917   reg_saved_in_data *elt;
918   size_t i;
919 
920   FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, i, elt)
921     if (compare_reg_or_pc (elt->orig_reg, src))
922       {
923 	if (dest == NULL)
924 	  cur_trace->regs_saved_in_regs.unordered_remove (i);
925 	else
926 	  elt->saved_in_reg = dest;
927 	return;
928       }
929 
930   if (dest == NULL)
931     return;
932 
933   reg_saved_in_data e = {src, dest};
934   cur_trace->regs_saved_in_regs.safe_push (e);
935 }
936 
937 /* Add an entry to QUEUED_REG_SAVES saying that REG is now saved at
938    SREG, or if SREG is NULL then it is saved at OFFSET to the CFA.  */
939 
940 static void
941 queue_reg_save (rtx reg, rtx sreg, HOST_WIDE_INT offset)
942 {
943   queued_reg_save *q;
944   queued_reg_save e = {reg, sreg, offset};
945   size_t i;
946 
947   /* Duplicates waste space, but it's also necessary to remove them
948      for correctness, since the queue gets output in reverse order.  */
949   FOR_EACH_VEC_ELT (queued_reg_saves, i, q)
950     if (compare_reg_or_pc (q->reg, reg))
951       {
952 	*q = e;
953 	return;
954       }
955 
956   queued_reg_saves.safe_push (e);
957 }
958 
959 /* Output all the entries in QUEUED_REG_SAVES.  */
960 
961 static void
962 dwarf2out_flush_queued_reg_saves (void)
963 {
964   queued_reg_save *q;
965   size_t i;
966 
967   FOR_EACH_VEC_ELT (queued_reg_saves, i, q)
968     {
969       unsigned int reg, sreg;
970 
971       record_reg_saved_in_reg (q->saved_reg, q->reg);
972 
973       if (q->reg == pc_rtx)
974 	reg = DWARF_FRAME_RETURN_COLUMN;
975       else
976         reg = dwf_regno (q->reg);
977       if (q->saved_reg)
978 	sreg = dwf_regno (q->saved_reg);
979       else
980 	sreg = INVALID_REGNUM;
981       reg_save (reg, sreg, q->cfa_offset);
982     }
983 
984   queued_reg_saves.truncate (0);
985 }
986 
987 /* Does INSN clobber any register which QUEUED_REG_SAVES lists a saved
988    location for?  Or, does it clobber a register which we've previously
989    said that some other register is saved in, and for which we now
990    have a new location for?  */
991 
992 static bool
993 clobbers_queued_reg_save (const_rtx insn)
994 {
995   queued_reg_save *q;
996   size_t iq;
997 
998   FOR_EACH_VEC_ELT (queued_reg_saves, iq, q)
999     {
1000       size_t ir;
1001       reg_saved_in_data *rir;
1002 
1003       if (modified_in_p (q->reg, insn))
1004 	return true;
1005 
1006       FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, ir, rir)
1007 	if (compare_reg_or_pc (q->reg, rir->orig_reg)
1008 	    && modified_in_p (rir->saved_in_reg, insn))
1009 	  return true;
1010     }
1011 
1012   return false;
1013 }
1014 
1015 /* What register, if any, is currently saved in REG?  */
1016 
1017 static rtx
1018 reg_saved_in (rtx reg)
1019 {
1020   unsigned int regn = REGNO (reg);
1021   queued_reg_save *q;
1022   reg_saved_in_data *rir;
1023   size_t i;
1024 
1025   FOR_EACH_VEC_ELT (queued_reg_saves, i, q)
1026     if (q->saved_reg && regn == REGNO (q->saved_reg))
1027       return q->reg;
1028 
1029   FOR_EACH_VEC_ELT (cur_trace->regs_saved_in_regs, i, rir)
1030     if (regn == REGNO (rir->saved_in_reg))
1031       return rir->orig_reg;
1032 
1033   return NULL_RTX;
1034 }
1035 
1036 /* A subroutine of dwarf2out_frame_debug, process a REG_DEF_CFA note.  */
1037 
1038 static void
1039 dwarf2out_frame_debug_def_cfa (rtx pat)
1040 {
1041   memset (cur_cfa, 0, sizeof (*cur_cfa));
1042 
1043   if (GET_CODE (pat) == PLUS)
1044     {
1045       cur_cfa->offset = INTVAL (XEXP (pat, 1));
1046       pat = XEXP (pat, 0);
1047     }
1048   if (MEM_P (pat))
1049     {
1050       cur_cfa->indirect = 1;
1051       pat = XEXP (pat, 0);
1052       if (GET_CODE (pat) == PLUS)
1053 	{
1054 	  cur_cfa->base_offset = INTVAL (XEXP (pat, 1));
1055 	  pat = XEXP (pat, 0);
1056 	}
1057     }
1058   /* ??? If this fails, we could be calling into the _loc functions to
1059      define a full expression.  So far no port does that.  */
1060   gcc_assert (REG_P (pat));
1061   cur_cfa->reg = dwf_regno (pat);
1062 }
1063 
1064 /* A subroutine of dwarf2out_frame_debug, process a REG_ADJUST_CFA note.  */
1065 
1066 static void
1067 dwarf2out_frame_debug_adjust_cfa (rtx pat)
1068 {
1069   rtx src, dest;
1070 
1071   gcc_assert (GET_CODE (pat) == SET);
1072   dest = XEXP (pat, 0);
1073   src = XEXP (pat, 1);
1074 
1075   switch (GET_CODE (src))
1076     {
1077     case PLUS:
1078       gcc_assert (dwf_regno (XEXP (src, 0)) == cur_cfa->reg);
1079       cur_cfa->offset -= INTVAL (XEXP (src, 1));
1080       break;
1081 
1082     case REG:
1083       break;
1084 
1085     default:
1086       gcc_unreachable ();
1087     }
1088 
1089   cur_cfa->reg = dwf_regno (dest);
1090   gcc_assert (cur_cfa->indirect == 0);
1091 }
1092 
1093 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_OFFSET note.  */
1094 
1095 static void
1096 dwarf2out_frame_debug_cfa_offset (rtx set)
1097 {
1098   HOST_WIDE_INT offset;
1099   rtx src, addr, span;
1100   unsigned int sregno;
1101 
1102   src = XEXP (set, 1);
1103   addr = XEXP (set, 0);
1104   gcc_assert (MEM_P (addr));
1105   addr = XEXP (addr, 0);
1106 
1107   /* As documented, only consider extremely simple addresses.  */
1108   switch (GET_CODE (addr))
1109     {
1110     case REG:
1111       gcc_assert (dwf_regno (addr) == cur_cfa->reg);
1112       offset = -cur_cfa->offset;
1113       break;
1114     case PLUS:
1115       gcc_assert (dwf_regno (XEXP (addr, 0)) == cur_cfa->reg);
1116       offset = INTVAL (XEXP (addr, 1)) - cur_cfa->offset;
1117       break;
1118     default:
1119       gcc_unreachable ();
1120     }
1121 
1122   if (src == pc_rtx)
1123     {
1124       span = NULL;
1125       sregno = DWARF_FRAME_RETURN_COLUMN;
1126     }
1127   else
1128     {
1129       span = targetm.dwarf_register_span (src);
1130       sregno = dwf_regno (src);
1131     }
1132 
1133   /* ??? We'd like to use queue_reg_save, but we need to come up with
1134      a different flushing heuristic for epilogues.  */
1135   if (!span)
1136     reg_save (sregno, INVALID_REGNUM, offset);
1137   else
1138     {
1139       /* We have a PARALLEL describing where the contents of SRC live.
1140    	 Queue register saves for each piece of the PARALLEL.  */
1141       int par_index;
1142       int limit;
1143       HOST_WIDE_INT span_offset = offset;
1144 
1145       gcc_assert (GET_CODE (span) == PARALLEL);
1146 
1147       limit = XVECLEN (span, 0);
1148       for (par_index = 0; par_index < limit; par_index++)
1149 	{
1150 	  rtx elem = XVECEXP (span, 0, par_index);
1151 
1152 	  sregno = dwf_regno (src);
1153 	  reg_save (sregno, INVALID_REGNUM, span_offset);
1154 	  span_offset += GET_MODE_SIZE (GET_MODE (elem));
1155 	}
1156     }
1157 }
1158 
1159 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_REGISTER note.  */
1160 
1161 static void
1162 dwarf2out_frame_debug_cfa_register (rtx set)
1163 {
1164   rtx src, dest;
1165   unsigned sregno, dregno;
1166 
1167   src = XEXP (set, 1);
1168   dest = XEXP (set, 0);
1169 
1170   record_reg_saved_in_reg (dest, src);
1171   if (src == pc_rtx)
1172     sregno = DWARF_FRAME_RETURN_COLUMN;
1173   else
1174     sregno = dwf_regno (src);
1175 
1176   dregno = dwf_regno (dest);
1177 
1178   /* ??? We'd like to use queue_reg_save, but we need to come up with
1179      a different flushing heuristic for epilogues.  */
1180   reg_save (sregno, dregno, 0);
1181 }
1182 
1183 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_EXPRESSION note. */
1184 
1185 static void
1186 dwarf2out_frame_debug_cfa_expression (rtx set)
1187 {
1188   rtx src, dest, span;
1189   dw_cfi_ref cfi = new_cfi ();
1190   unsigned regno;
1191 
1192   dest = SET_DEST (set);
1193   src = SET_SRC (set);
1194 
1195   gcc_assert (REG_P (src));
1196   gcc_assert (MEM_P (dest));
1197 
1198   span = targetm.dwarf_register_span (src);
1199   gcc_assert (!span);
1200 
1201   regno = dwf_regno (src);
1202 
1203   cfi->dw_cfi_opc = DW_CFA_expression;
1204   cfi->dw_cfi_oprnd1.dw_cfi_reg_num = regno;
1205   cfi->dw_cfi_oprnd2.dw_cfi_loc
1206     = mem_loc_descriptor (XEXP (dest, 0), get_address_mode (dest),
1207 			  GET_MODE (dest), VAR_INIT_STATUS_INITIALIZED);
1208 
1209   /* ??? We'd like to use queue_reg_save, were the interface different,
1210      and, as above, we could manage flushing for epilogues.  */
1211   add_cfi (cfi);
1212   update_row_reg_save (cur_row, regno, cfi);
1213 }
1214 
1215 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_RESTORE note.  */
1216 
1217 static void
1218 dwarf2out_frame_debug_cfa_restore (rtx reg)
1219 {
1220   unsigned int regno = dwf_regno (reg);
1221 
1222   add_cfi_restore (regno);
1223   update_row_reg_save (cur_row, regno, NULL);
1224 }
1225 
1226 /* A subroutine of dwarf2out_frame_debug, process a REG_CFA_WINDOW_SAVE.
1227    ??? Perhaps we should note in the CIE where windows are saved (instead of
1228    assuming 0(cfa)) and what registers are in the window.  */
1229 
1230 static void
1231 dwarf2out_frame_debug_cfa_window_save (void)
1232 {
1233   dw_cfi_ref cfi = new_cfi ();
1234 
1235   cfi->dw_cfi_opc = DW_CFA_GNU_window_save;
1236   add_cfi (cfi);
1237 }
1238 
1239 /* Record call frame debugging information for an expression EXPR,
1240    which either sets SP or FP (adjusting how we calculate the frame
1241    address) or saves a register to the stack or another register.
1242    LABEL indicates the address of EXPR.
1243 
1244    This function encodes a state machine mapping rtxes to actions on
1245    cfa, cfa_store, and cfa_temp.reg.  We describe these rules so
1246    users need not read the source code.
1247 
1248   The High-Level Picture
1249 
1250   Changes in the register we use to calculate the CFA: Currently we
1251   assume that if you copy the CFA register into another register, we
1252   should take the other one as the new CFA register; this seems to
1253   work pretty well.  If it's wrong for some target, it's simple
1254   enough not to set RTX_FRAME_RELATED_P on the insn in question.
1255 
1256   Changes in the register we use for saving registers to the stack:
1257   This is usually SP, but not always.  Again, we deduce that if you
1258   copy SP into another register (and SP is not the CFA register),
1259   then the new register is the one we will be using for register
1260   saves.  This also seems to work.
1261 
1262   Register saves: There's not much guesswork about this one; if
1263   RTX_FRAME_RELATED_P is set on an insn which modifies memory, it's a
1264   register save, and the register used to calculate the destination
1265   had better be the one we think we're using for this purpose.
1266   It's also assumed that a copy from a call-saved register to another
1267   register is saving that register if RTX_FRAME_RELATED_P is set on
1268   that instruction.  If the copy is from a call-saved register to
1269   the *same* register, that means that the register is now the same
1270   value as in the caller.
1271 
1272   Except: If the register being saved is the CFA register, and the
1273   offset is nonzero, we are saving the CFA, so we assume we have to
1274   use DW_CFA_def_cfa_expression.  If the offset is 0, we assume that
1275   the intent is to save the value of SP from the previous frame.
1276 
1277   In addition, if a register has previously been saved to a different
1278   register,
1279 
1280   Invariants / Summaries of Rules
1281 
1282   cfa	       current rule for calculating the CFA.  It usually
1283 	       consists of a register and an offset.  This is
1284 	       actually stored in *cur_cfa, but abbreviated
1285 	       for the purposes of this documentation.
1286   cfa_store    register used by prologue code to save things to the stack
1287 	       cfa_store.offset is the offset from the value of
1288 	       cfa_store.reg to the actual CFA
1289   cfa_temp     register holding an integral value.  cfa_temp.offset
1290 	       stores the value, which will be used to adjust the
1291 	       stack pointer.  cfa_temp is also used like cfa_store,
1292 	       to track stores to the stack via fp or a temp reg.
1293 
1294   Rules  1- 4: Setting a register's value to cfa.reg or an expression
1295 	       with cfa.reg as the first operand changes the cfa.reg and its
1296 	       cfa.offset.  Rule 1 and 4 also set cfa_temp.reg and
1297 	       cfa_temp.offset.
1298 
1299   Rules  6- 9: Set a non-cfa.reg register value to a constant or an
1300 	       expression yielding a constant.  This sets cfa_temp.reg
1301 	       and cfa_temp.offset.
1302 
1303   Rule 5:      Create a new register cfa_store used to save items to the
1304 	       stack.
1305 
1306   Rules 10-14: Save a register to the stack.  Define offset as the
1307 	       difference of the original location and cfa_store's
1308 	       location (or cfa_temp's location if cfa_temp is used).
1309 
1310   Rules 16-20: If AND operation happens on sp in prologue, we assume
1311 	       stack is realigned.  We will use a group of DW_OP_XXX
1312 	       expressions to represent the location of the stored
1313 	       register instead of CFA+offset.
1314 
1315   The Rules
1316 
1317   "{a,b}" indicates a choice of a xor b.
1318   "<reg>:cfa.reg" indicates that <reg> must equal cfa.reg.
1319 
1320   Rule 1:
1321   (set <reg1> <reg2>:cfa.reg)
1322   effects: cfa.reg = <reg1>
1323 	   cfa.offset unchanged
1324 	   cfa_temp.reg = <reg1>
1325 	   cfa_temp.offset = cfa.offset
1326 
1327   Rule 2:
1328   (set sp ({minus,plus,losum} {sp,fp}:cfa.reg
1329 			      {<const_int>,<reg>:cfa_temp.reg}))
1330   effects: cfa.reg = sp if fp used
1331 	   cfa.offset += {+/- <const_int>, cfa_temp.offset} if cfa.reg==sp
1332 	   cfa_store.offset += {+/- <const_int>, cfa_temp.offset}
1333 	     if cfa_store.reg==sp
1334 
1335   Rule 3:
1336   (set fp ({minus,plus,losum} <reg>:cfa.reg <const_int>))
1337   effects: cfa.reg = fp
1338 	   cfa_offset += +/- <const_int>
1339 
1340   Rule 4:
1341   (set <reg1> ({plus,losum} <reg2>:cfa.reg <const_int>))
1342   constraints: <reg1> != fp
1343 	       <reg1> != sp
1344   effects: cfa.reg = <reg1>
1345 	   cfa_temp.reg = <reg1>
1346 	   cfa_temp.offset = cfa.offset
1347 
1348   Rule 5:
1349   (set <reg1> (plus <reg2>:cfa_temp.reg sp:cfa.reg))
1350   constraints: <reg1> != fp
1351 	       <reg1> != sp
1352   effects: cfa_store.reg = <reg1>
1353 	   cfa_store.offset = cfa.offset - cfa_temp.offset
1354 
1355   Rule 6:
1356   (set <reg> <const_int>)
1357   effects: cfa_temp.reg = <reg>
1358 	   cfa_temp.offset = <const_int>
1359 
1360   Rule 7:
1361   (set <reg1>:cfa_temp.reg (ior <reg2>:cfa_temp.reg <const_int>))
1362   effects: cfa_temp.reg = <reg1>
1363 	   cfa_temp.offset |= <const_int>
1364 
1365   Rule 8:
1366   (set <reg> (high <exp>))
1367   effects: none
1368 
1369   Rule 9:
1370   (set <reg> (lo_sum <exp> <const_int>))
1371   effects: cfa_temp.reg = <reg>
1372 	   cfa_temp.offset = <const_int>
1373 
1374   Rule 10:
1375   (set (mem ({pre,post}_modify sp:cfa_store (???? <reg1> <const_int>))) <reg2>)
1376   effects: cfa_store.offset -= <const_int>
1377 	   cfa.offset = cfa_store.offset if cfa.reg == sp
1378 	   cfa.reg = sp
1379 	   cfa.base_offset = -cfa_store.offset
1380 
1381   Rule 11:
1382   (set (mem ({pre_inc,pre_dec,post_dec} sp:cfa_store.reg)) <reg>)
1383   effects: cfa_store.offset += -/+ mode_size(mem)
1384 	   cfa.offset = cfa_store.offset if cfa.reg == sp
1385 	   cfa.reg = sp
1386 	   cfa.base_offset = -cfa_store.offset
1387 
1388   Rule 12:
1389   (set (mem ({minus,plus,losum} <reg1>:{cfa_store,cfa_temp} <const_int>))
1390 
1391        <reg2>)
1392   effects: cfa.reg = <reg1>
1393 	   cfa.base_offset = -/+ <const_int> - {cfa_store,cfa_temp}.offset
1394 
1395   Rule 13:
1396   (set (mem <reg1>:{cfa_store,cfa_temp}) <reg2>)
1397   effects: cfa.reg = <reg1>
1398 	   cfa.base_offset = -{cfa_store,cfa_temp}.offset
1399 
1400   Rule 14:
1401   (set (mem (post_inc <reg1>:cfa_temp <const_int>)) <reg2>)
1402   effects: cfa.reg = <reg1>
1403 	   cfa.base_offset = -cfa_temp.offset
1404 	   cfa_temp.offset -= mode_size(mem)
1405 
1406   Rule 15:
1407   (set <reg> {unspec, unspec_volatile})
1408   effects: target-dependent
1409 
1410   Rule 16:
1411   (set sp (and: sp <const_int>))
1412   constraints: cfa_store.reg == sp
1413   effects: cfun->fde.stack_realign = 1
1414            cfa_store.offset = 0
1415 	   fde->drap_reg = cfa.reg if cfa.reg != sp and cfa.reg != fp
1416 
1417   Rule 17:
1418   (set (mem ({pre_inc, pre_dec} sp)) (mem (plus (cfa.reg) (const_int))))
1419   effects: cfa_store.offset += -/+ mode_size(mem)
1420 
1421   Rule 18:
1422   (set (mem ({pre_inc, pre_dec} sp)) fp)
1423   constraints: fde->stack_realign == 1
1424   effects: cfa_store.offset = 0
1425 	   cfa.reg != HARD_FRAME_POINTER_REGNUM
1426 
1427   Rule 19:
1428   (set (mem ({pre_inc, pre_dec} sp)) cfa.reg)
1429   constraints: fde->stack_realign == 1
1430                && cfa.offset == 0
1431                && cfa.indirect == 0
1432                && cfa.reg != HARD_FRAME_POINTER_REGNUM
1433   effects: Use DW_CFA_def_cfa_expression to define cfa
1434   	   cfa.reg == fde->drap_reg  */
1435 
1436 static void
1437 dwarf2out_frame_debug_expr (rtx expr)
1438 {
1439   rtx src, dest, span;
1440   HOST_WIDE_INT offset;
1441   dw_fde_ref fde;
1442 
1443   /* If RTX_FRAME_RELATED_P is set on a PARALLEL, process each member of
1444      the PARALLEL independently. The first element is always processed if
1445      it is a SET. This is for backward compatibility.   Other elements
1446      are processed only if they are SETs and the RTX_FRAME_RELATED_P
1447      flag is set in them.  */
1448   if (GET_CODE (expr) == PARALLEL || GET_CODE (expr) == SEQUENCE)
1449     {
1450       int par_index;
1451       int limit = XVECLEN (expr, 0);
1452       rtx elem;
1453 
1454       /* PARALLELs have strict read-modify-write semantics, so we
1455 	 ought to evaluate every rvalue before changing any lvalue.
1456 	 It's cumbersome to do that in general, but there's an
1457 	 easy approximation that is enough for all current users:
1458 	 handle register saves before register assignments.  */
1459       if (GET_CODE (expr) == PARALLEL)
1460 	for (par_index = 0; par_index < limit; par_index++)
1461 	  {
1462 	    elem = XVECEXP (expr, 0, par_index);
1463 	    if (GET_CODE (elem) == SET
1464 		&& MEM_P (SET_DEST (elem))
1465 		&& (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1466 	      dwarf2out_frame_debug_expr (elem);
1467 	  }
1468 
1469       for (par_index = 0; par_index < limit; par_index++)
1470 	{
1471 	  elem = XVECEXP (expr, 0, par_index);
1472 	  if (GET_CODE (elem) == SET
1473 	      && (!MEM_P (SET_DEST (elem)) || GET_CODE (expr) == SEQUENCE)
1474 	      && (RTX_FRAME_RELATED_P (elem) || par_index == 0))
1475 	    dwarf2out_frame_debug_expr (elem);
1476 	}
1477       return;
1478     }
1479 
1480   gcc_assert (GET_CODE (expr) == SET);
1481 
1482   src = SET_SRC (expr);
1483   dest = SET_DEST (expr);
1484 
1485   if (REG_P (src))
1486     {
1487       rtx rsi = reg_saved_in (src);
1488       if (rsi)
1489 	src = rsi;
1490     }
1491 
1492   fde = cfun->fde;
1493 
1494   switch (GET_CODE (dest))
1495     {
1496     case REG:
1497       switch (GET_CODE (src))
1498 	{
1499 	  /* Setting FP from SP.  */
1500 	case REG:
1501 	  if (cur_cfa->reg == dwf_regno (src))
1502 	    {
1503 	      /* Rule 1 */
1504 	      /* Update the CFA rule wrt SP or FP.  Make sure src is
1505 		 relative to the current CFA register.
1506 
1507 		 We used to require that dest be either SP or FP, but the
1508 		 ARM copies SP to a temporary register, and from there to
1509 		 FP.  So we just rely on the backends to only set
1510 		 RTX_FRAME_RELATED_P on appropriate insns.  */
1511 	      cur_cfa->reg = dwf_regno (dest);
1512 	      cur_trace->cfa_temp.reg = cur_cfa->reg;
1513 	      cur_trace->cfa_temp.offset = cur_cfa->offset;
1514 	    }
1515 	  else
1516 	    {
1517 	      /* Saving a register in a register.  */
1518 	      gcc_assert (!fixed_regs [REGNO (dest)]
1519 			  /* For the SPARC and its register window.  */
1520 			  || (dwf_regno (src) == DWARF_FRAME_RETURN_COLUMN));
1521 
1522               /* After stack is aligned, we can only save SP in FP
1523 		 if drap register is used.  In this case, we have
1524 		 to restore stack pointer with the CFA value and we
1525 		 don't generate this DWARF information.  */
1526 	      if (fde
1527 		  && fde->stack_realign
1528 		  && REGNO (src) == STACK_POINTER_REGNUM)
1529 		gcc_assert (REGNO (dest) == HARD_FRAME_POINTER_REGNUM
1530 			    && fde->drap_reg != INVALID_REGNUM
1531 			    && cur_cfa->reg != dwf_regno (src));
1532 	      else
1533 		queue_reg_save (src, dest, 0);
1534 	    }
1535 	  break;
1536 
1537 	case PLUS:
1538 	case MINUS:
1539 	case LO_SUM:
1540 	  if (dest == stack_pointer_rtx)
1541 	    {
1542 	      /* Rule 2 */
1543 	      /* Adjusting SP.  */
1544 	      switch (GET_CODE (XEXP (src, 1)))
1545 		{
1546 		case CONST_INT:
1547 		  offset = INTVAL (XEXP (src, 1));
1548 		  break;
1549 		case REG:
1550 		  gcc_assert (dwf_regno (XEXP (src, 1))
1551 			      == cur_trace->cfa_temp.reg);
1552 		  offset = cur_trace->cfa_temp.offset;
1553 		  break;
1554 		default:
1555 		  gcc_unreachable ();
1556 		}
1557 
1558 	      if (XEXP (src, 0) == hard_frame_pointer_rtx)
1559 		{
1560 		  /* Restoring SP from FP in the epilogue.  */
1561 		  gcc_assert (cur_cfa->reg == dw_frame_pointer_regnum);
1562 		  cur_cfa->reg = dw_stack_pointer_regnum;
1563 		}
1564 	      else if (GET_CODE (src) == LO_SUM)
1565 		/* Assume we've set the source reg of the LO_SUM from sp.  */
1566 		;
1567 	      else
1568 		gcc_assert (XEXP (src, 0) == stack_pointer_rtx);
1569 
1570 	      if (GET_CODE (src) != MINUS)
1571 		offset = -offset;
1572 	      if (cur_cfa->reg == dw_stack_pointer_regnum)
1573 		cur_cfa->offset += offset;
1574 	      if (cur_trace->cfa_store.reg == dw_stack_pointer_regnum)
1575 		cur_trace->cfa_store.offset += offset;
1576 	    }
1577 	  else if (dest == hard_frame_pointer_rtx)
1578 	    {
1579 	      /* Rule 3 */
1580 	      /* Either setting the FP from an offset of the SP,
1581 		 or adjusting the FP */
1582 	      gcc_assert (frame_pointer_needed);
1583 
1584 	      gcc_assert (REG_P (XEXP (src, 0))
1585 			  && dwf_regno (XEXP (src, 0)) == cur_cfa->reg
1586 			  && CONST_INT_P (XEXP (src, 1)));
1587 	      offset = INTVAL (XEXP (src, 1));
1588 	      if (GET_CODE (src) != MINUS)
1589 		offset = -offset;
1590 	      cur_cfa->offset += offset;
1591 	      cur_cfa->reg = dw_frame_pointer_regnum;
1592 	    }
1593 	  else
1594 	    {
1595 	      gcc_assert (GET_CODE (src) != MINUS);
1596 
1597 	      /* Rule 4 */
1598 	      if (REG_P (XEXP (src, 0))
1599 		  && dwf_regno (XEXP (src, 0)) == cur_cfa->reg
1600 		  && CONST_INT_P (XEXP (src, 1)))
1601 		{
1602 		  /* Setting a temporary CFA register that will be copied
1603 		     into the FP later on.  */
1604 		  offset = - INTVAL (XEXP (src, 1));
1605 		  cur_cfa->offset += offset;
1606 		  cur_cfa->reg = dwf_regno (dest);
1607 		  /* Or used to save regs to the stack.  */
1608 		  cur_trace->cfa_temp.reg = cur_cfa->reg;
1609 		  cur_trace->cfa_temp.offset = cur_cfa->offset;
1610 		}
1611 
1612 	      /* Rule 5 */
1613 	      else if (REG_P (XEXP (src, 0))
1614 		       && dwf_regno (XEXP (src, 0)) == cur_trace->cfa_temp.reg
1615 		       && XEXP (src, 1) == stack_pointer_rtx)
1616 		{
1617 		  /* Setting a scratch register that we will use instead
1618 		     of SP for saving registers to the stack.  */
1619 		  gcc_assert (cur_cfa->reg == dw_stack_pointer_regnum);
1620 		  cur_trace->cfa_store.reg = dwf_regno (dest);
1621 		  cur_trace->cfa_store.offset
1622 		    = cur_cfa->offset - cur_trace->cfa_temp.offset;
1623 		}
1624 
1625 	      /* Rule 9 */
1626 	      else if (GET_CODE (src) == LO_SUM
1627 		       && CONST_INT_P (XEXP (src, 1)))
1628 		{
1629 		  cur_trace->cfa_temp.reg = dwf_regno (dest);
1630 		  cur_trace->cfa_temp.offset = INTVAL (XEXP (src, 1));
1631 		}
1632 	      else
1633 		gcc_unreachable ();
1634 	    }
1635 	  break;
1636 
1637 	  /* Rule 6 */
1638 	case CONST_INT:
1639 	  cur_trace->cfa_temp.reg = dwf_regno (dest);
1640 	  cur_trace->cfa_temp.offset = INTVAL (src);
1641 	  break;
1642 
1643 	  /* Rule 7 */
1644 	case IOR:
1645 	  gcc_assert (REG_P (XEXP (src, 0))
1646 		      && dwf_regno (XEXP (src, 0)) == cur_trace->cfa_temp.reg
1647 		      && CONST_INT_P (XEXP (src, 1)));
1648 
1649 	  cur_trace->cfa_temp.reg = dwf_regno (dest);
1650 	  cur_trace->cfa_temp.offset |= INTVAL (XEXP (src, 1));
1651 	  break;
1652 
1653 	  /* Skip over HIGH, assuming it will be followed by a LO_SUM,
1654 	     which will fill in all of the bits.  */
1655 	  /* Rule 8 */
1656 	case HIGH:
1657 	  break;
1658 
1659 	  /* Rule 15 */
1660 	case UNSPEC:
1661 	case UNSPEC_VOLATILE:
1662 	  /* All unspecs should be represented by REG_CFA_* notes.  */
1663 	  gcc_unreachable ();
1664 	  return;
1665 
1666 	  /* Rule 16 */
1667 	case AND:
1668           /* If this AND operation happens on stack pointer in prologue,
1669 	     we assume the stack is realigned and we extract the
1670 	     alignment.  */
1671           if (fde && XEXP (src, 0) == stack_pointer_rtx)
1672             {
1673 	      /* We interpret reg_save differently with stack_realign set.
1674 		 Thus we must flush whatever we have queued first.  */
1675 	      dwarf2out_flush_queued_reg_saves ();
1676 
1677               gcc_assert (cur_trace->cfa_store.reg
1678 			  == dwf_regno (XEXP (src, 0)));
1679               fde->stack_realign = 1;
1680               fde->stack_realignment = INTVAL (XEXP (src, 1));
1681               cur_trace->cfa_store.offset = 0;
1682 
1683 	      if (cur_cfa->reg != dw_stack_pointer_regnum
1684 		  && cur_cfa->reg != dw_frame_pointer_regnum)
1685 		fde->drap_reg = cur_cfa->reg;
1686             }
1687           return;
1688 
1689 	default:
1690 	  gcc_unreachable ();
1691 	}
1692       break;
1693 
1694     case MEM:
1695 
1696       /* Saving a register to the stack.  Make sure dest is relative to the
1697 	 CFA register.  */
1698       switch (GET_CODE (XEXP (dest, 0)))
1699 	{
1700 	  /* Rule 10 */
1701 	  /* With a push.  */
1702 	case PRE_MODIFY:
1703 	case POST_MODIFY:
1704 	  /* We can't handle variable size modifications.  */
1705 	  gcc_assert (GET_CODE (XEXP (XEXP (XEXP (dest, 0), 1), 1))
1706 		      == CONST_INT);
1707 	  offset = -INTVAL (XEXP (XEXP (XEXP (dest, 0), 1), 1));
1708 
1709 	  gcc_assert (REGNO (XEXP (XEXP (dest, 0), 0)) == STACK_POINTER_REGNUM
1710 		      && cur_trace->cfa_store.reg == dw_stack_pointer_regnum);
1711 
1712 	  cur_trace->cfa_store.offset += offset;
1713 	  if (cur_cfa->reg == dw_stack_pointer_regnum)
1714 	    cur_cfa->offset = cur_trace->cfa_store.offset;
1715 
1716 	  if (GET_CODE (XEXP (dest, 0)) == POST_MODIFY)
1717 	    offset -= cur_trace->cfa_store.offset;
1718 	  else
1719 	    offset = -cur_trace->cfa_store.offset;
1720 	  break;
1721 
1722 	  /* Rule 11 */
1723 	case PRE_INC:
1724 	case PRE_DEC:
1725 	case POST_DEC:
1726 	  offset = GET_MODE_SIZE (GET_MODE (dest));
1727 	  if (GET_CODE (XEXP (dest, 0)) == PRE_INC)
1728 	    offset = -offset;
1729 
1730 	  gcc_assert ((REGNO (XEXP (XEXP (dest, 0), 0))
1731 		       == STACK_POINTER_REGNUM)
1732 		      && cur_trace->cfa_store.reg == dw_stack_pointer_regnum);
1733 
1734 	  cur_trace->cfa_store.offset += offset;
1735 
1736           /* Rule 18: If stack is aligned, we will use FP as a
1737 	     reference to represent the address of the stored
1738 	     regiser.  */
1739           if (fde
1740               && fde->stack_realign
1741 	      && REG_P (src)
1742 	      && REGNO (src) == HARD_FRAME_POINTER_REGNUM)
1743 	    {
1744 	      gcc_assert (cur_cfa->reg != dw_frame_pointer_regnum);
1745 	      cur_trace->cfa_store.offset = 0;
1746 	    }
1747 
1748 	  if (cur_cfa->reg == dw_stack_pointer_regnum)
1749 	    cur_cfa->offset = cur_trace->cfa_store.offset;
1750 
1751 	  if (GET_CODE (XEXP (dest, 0)) == POST_DEC)
1752 	    offset += -cur_trace->cfa_store.offset;
1753 	  else
1754 	    offset = -cur_trace->cfa_store.offset;
1755 	  break;
1756 
1757 	  /* Rule 12 */
1758 	  /* With an offset.  */
1759 	case PLUS:
1760 	case MINUS:
1761 	case LO_SUM:
1762 	  {
1763 	    unsigned int regno;
1764 
1765 	    gcc_assert (CONST_INT_P (XEXP (XEXP (dest, 0), 1))
1766 			&& REG_P (XEXP (XEXP (dest, 0), 0)));
1767 	    offset = INTVAL (XEXP (XEXP (dest, 0), 1));
1768 	    if (GET_CODE (XEXP (dest, 0)) == MINUS)
1769 	      offset = -offset;
1770 
1771 	    regno = dwf_regno (XEXP (XEXP (dest, 0), 0));
1772 
1773 	    if (cur_cfa->reg == regno)
1774 	      offset -= cur_cfa->offset;
1775 	    else if (cur_trace->cfa_store.reg == regno)
1776 	      offset -= cur_trace->cfa_store.offset;
1777 	    else
1778 	      {
1779 		gcc_assert (cur_trace->cfa_temp.reg == regno);
1780 		offset -= cur_trace->cfa_temp.offset;
1781 	      }
1782 	  }
1783 	  break;
1784 
1785 	  /* Rule 13 */
1786 	  /* Without an offset.  */
1787 	case REG:
1788 	  {
1789 	    unsigned int regno = dwf_regno (XEXP (dest, 0));
1790 
1791 	    if (cur_cfa->reg == regno)
1792 	      offset = -cur_cfa->offset;
1793 	    else if (cur_trace->cfa_store.reg == regno)
1794 	      offset = -cur_trace->cfa_store.offset;
1795 	    else
1796 	      {
1797 		gcc_assert (cur_trace->cfa_temp.reg == regno);
1798 		offset = -cur_trace->cfa_temp.offset;
1799 	      }
1800 	  }
1801 	  break;
1802 
1803 	  /* Rule 14 */
1804 	case POST_INC:
1805 	  gcc_assert (cur_trace->cfa_temp.reg
1806 		      == dwf_regno (XEXP (XEXP (dest, 0), 0)));
1807 	  offset = -cur_trace->cfa_temp.offset;
1808 	  cur_trace->cfa_temp.offset -= GET_MODE_SIZE (GET_MODE (dest));
1809 	  break;
1810 
1811 	default:
1812 	  gcc_unreachable ();
1813 	}
1814 
1815       /* Rule 17 */
1816       /* If the source operand of this MEM operation is a memory,
1817 	 we only care how much stack grew.  */
1818       if (MEM_P (src))
1819         break;
1820 
1821       if (REG_P (src)
1822 	  && REGNO (src) != STACK_POINTER_REGNUM
1823 	  && REGNO (src) != HARD_FRAME_POINTER_REGNUM
1824 	  && dwf_regno (src) == cur_cfa->reg)
1825 	{
1826 	  /* We're storing the current CFA reg into the stack.  */
1827 
1828 	  if (cur_cfa->offset == 0)
1829 	    {
1830               /* Rule 19 */
1831               /* If stack is aligned, putting CFA reg into stack means
1832 		 we can no longer use reg + offset to represent CFA.
1833 		 Here we use DW_CFA_def_cfa_expression instead.  The
1834 		 result of this expression equals to the original CFA
1835 		 value.  */
1836               if (fde
1837                   && fde->stack_realign
1838                   && cur_cfa->indirect == 0
1839                   && cur_cfa->reg != dw_frame_pointer_regnum)
1840                 {
1841 		  gcc_assert (fde->drap_reg == cur_cfa->reg);
1842 
1843 		  cur_cfa->indirect = 1;
1844 		  cur_cfa->reg = dw_frame_pointer_regnum;
1845 		  cur_cfa->base_offset = offset;
1846 		  cur_cfa->offset = 0;
1847 
1848 		  fde->drap_reg_saved = 1;
1849 		  break;
1850                 }
1851 
1852 	      /* If the source register is exactly the CFA, assume
1853 		 we're saving SP like any other register; this happens
1854 		 on the ARM.  */
1855 	      queue_reg_save (stack_pointer_rtx, NULL_RTX, offset);
1856 	      break;
1857 	    }
1858 	  else
1859 	    {
1860 	      /* Otherwise, we'll need to look in the stack to
1861 		 calculate the CFA.  */
1862 	      rtx x = XEXP (dest, 0);
1863 
1864 	      if (!REG_P (x))
1865 		x = XEXP (x, 0);
1866 	      gcc_assert (REG_P (x));
1867 
1868 	      cur_cfa->reg = dwf_regno (x);
1869 	      cur_cfa->base_offset = offset;
1870 	      cur_cfa->indirect = 1;
1871 	      break;
1872 	    }
1873 	}
1874 
1875       span = NULL;
1876       if (REG_P (src))
1877 	span = targetm.dwarf_register_span (src);
1878       if (!span)
1879 	queue_reg_save (src, NULL_RTX, offset);
1880       else
1881 	{
1882 	  /* We have a PARALLEL describing where the contents of SRC live.
1883 	     Queue register saves for each piece of the PARALLEL.  */
1884 	  int par_index;
1885 	  int limit;
1886 	  HOST_WIDE_INT span_offset = offset;
1887 
1888 	  gcc_assert (GET_CODE (span) == PARALLEL);
1889 
1890 	  limit = XVECLEN (span, 0);
1891 	  for (par_index = 0; par_index < limit; par_index++)
1892 	    {
1893 	      rtx elem = XVECEXP (span, 0, par_index);
1894 	      queue_reg_save (elem, NULL_RTX, span_offset);
1895 	      span_offset += GET_MODE_SIZE (GET_MODE (elem));
1896 	    }
1897 	}
1898       break;
1899 
1900     default:
1901       gcc_unreachable ();
1902     }
1903 }
1904 
1905 /* Record call frame debugging information for INSN, which either sets
1906    SP or FP (adjusting how we calculate the frame address) or saves a
1907    register to the stack.  */
1908 
1909 static void
1910 dwarf2out_frame_debug (rtx insn)
1911 {
1912   rtx note, n;
1913   bool handled_one = false;
1914 
1915   for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
1916     switch (REG_NOTE_KIND (note))
1917       {
1918       case REG_FRAME_RELATED_EXPR:
1919 	insn = XEXP (note, 0);
1920 	goto do_frame_expr;
1921 
1922       case REG_CFA_DEF_CFA:
1923 	dwarf2out_frame_debug_def_cfa (XEXP (note, 0));
1924 	handled_one = true;
1925 	break;
1926 
1927       case REG_CFA_ADJUST_CFA:
1928 	n = XEXP (note, 0);
1929 	if (n == NULL)
1930 	  {
1931 	    n = PATTERN (insn);
1932 	    if (GET_CODE (n) == PARALLEL)
1933 	      n = XVECEXP (n, 0, 0);
1934 	  }
1935 	dwarf2out_frame_debug_adjust_cfa (n);
1936 	handled_one = true;
1937 	break;
1938 
1939       case REG_CFA_OFFSET:
1940 	n = XEXP (note, 0);
1941 	if (n == NULL)
1942 	  n = single_set (insn);
1943 	dwarf2out_frame_debug_cfa_offset (n);
1944 	handled_one = true;
1945 	break;
1946 
1947       case REG_CFA_REGISTER:
1948 	n = XEXP (note, 0);
1949 	if (n == NULL)
1950 	  {
1951 	    n = PATTERN (insn);
1952 	    if (GET_CODE (n) == PARALLEL)
1953 	      n = XVECEXP (n, 0, 0);
1954 	  }
1955 	dwarf2out_frame_debug_cfa_register (n);
1956 	handled_one = true;
1957 	break;
1958 
1959       case REG_CFA_EXPRESSION:
1960 	n = XEXP (note, 0);
1961 	if (n == NULL)
1962 	  n = single_set (insn);
1963 	dwarf2out_frame_debug_cfa_expression (n);
1964 	handled_one = true;
1965 	break;
1966 
1967       case REG_CFA_RESTORE:
1968 	n = XEXP (note, 0);
1969 	if (n == NULL)
1970 	  {
1971 	    n = PATTERN (insn);
1972 	    if (GET_CODE (n) == PARALLEL)
1973 	      n = XVECEXP (n, 0, 0);
1974 	    n = XEXP (n, 0);
1975 	  }
1976 	dwarf2out_frame_debug_cfa_restore (n);
1977 	handled_one = true;
1978 	break;
1979 
1980       case REG_CFA_SET_VDRAP:
1981 	n = XEXP (note, 0);
1982 	if (REG_P (n))
1983 	  {
1984 	    dw_fde_ref fde = cfun->fde;
1985 	    if (fde)
1986 	      {
1987 		gcc_assert (fde->vdrap_reg == INVALID_REGNUM);
1988 		if (REG_P (n))
1989 		  fde->vdrap_reg = dwf_regno (n);
1990 	      }
1991 	  }
1992 	handled_one = true;
1993 	break;
1994 
1995       case REG_CFA_WINDOW_SAVE:
1996 	dwarf2out_frame_debug_cfa_window_save ();
1997 	handled_one = true;
1998 	break;
1999 
2000       case REG_CFA_FLUSH_QUEUE:
2001 	/* The actual flush happens elsewhere.  */
2002 	handled_one = true;
2003 	break;
2004 
2005       default:
2006 	break;
2007       }
2008 
2009   if (!handled_one)
2010     {
2011       insn = PATTERN (insn);
2012     do_frame_expr:
2013       dwarf2out_frame_debug_expr (insn);
2014 
2015       /* Check again.  A parallel can save and update the same register.
2016          We could probably check just once, here, but this is safer than
2017          removing the check at the start of the function.  */
2018       if (clobbers_queued_reg_save (insn))
2019 	dwarf2out_flush_queued_reg_saves ();
2020     }
2021 }
2022 
2023 /* Emit CFI info to change the state from OLD_ROW to NEW_ROW.  */
2024 
2025 static void
2026 change_cfi_row (dw_cfi_row *old_row, dw_cfi_row *new_row)
2027 {
2028   size_t i, n_old, n_new, n_max;
2029   dw_cfi_ref cfi;
2030 
2031   if (new_row->cfa_cfi && !cfi_equal_p (old_row->cfa_cfi, new_row->cfa_cfi))
2032     add_cfi (new_row->cfa_cfi);
2033   else
2034     {
2035       cfi = def_cfa_0 (&old_row->cfa, &new_row->cfa);
2036       if (cfi)
2037 	add_cfi (cfi);
2038     }
2039 
2040   n_old = vec_safe_length (old_row->reg_save);
2041   n_new = vec_safe_length (new_row->reg_save);
2042   n_max = MAX (n_old, n_new);
2043 
2044   for (i = 0; i < n_max; ++i)
2045     {
2046       dw_cfi_ref r_old = NULL, r_new = NULL;
2047 
2048       if (i < n_old)
2049 	r_old = (*old_row->reg_save)[i];
2050       if (i < n_new)
2051 	r_new = (*new_row->reg_save)[i];
2052 
2053       if (r_old == r_new)
2054 	;
2055       else if (r_new == NULL)
2056 	add_cfi_restore (i);
2057       else if (!cfi_equal_p (r_old, r_new))
2058         add_cfi (r_new);
2059     }
2060 }
2061 
2062 /* Examine CFI and return true if a cfi label and set_loc is needed
2063    beforehand.  Even when generating CFI assembler instructions, we
2064    still have to add the cfi to the list so that lookup_cfa_1 works
2065    later on.  When -g2 and above we even need to force emitting of
2066    CFI labels and add to list a DW_CFA_set_loc for convert_cfa_to_fb_loc_list
2067    purposes.  If we're generating DWARF3 output we use DW_OP_call_frame_cfa
2068    and so don't use convert_cfa_to_fb_loc_list.  */
2069 
2070 static bool
2071 cfi_label_required_p (dw_cfi_ref cfi)
2072 {
2073   if (!dwarf2out_do_cfi_asm ())
2074     return true;
2075 
2076   if (dwarf_version == 2
2077       && debug_info_level > DINFO_LEVEL_TERSE
2078       && (write_symbols == DWARF2_DEBUG
2079 	  || write_symbols == VMS_AND_DWARF2_DEBUG))
2080     {
2081       switch (cfi->dw_cfi_opc)
2082 	{
2083 	case DW_CFA_def_cfa_offset:
2084 	case DW_CFA_def_cfa_offset_sf:
2085 	case DW_CFA_def_cfa_register:
2086 	case DW_CFA_def_cfa:
2087 	case DW_CFA_def_cfa_sf:
2088 	case DW_CFA_def_cfa_expression:
2089 	case DW_CFA_restore_state:
2090 	  return true;
2091 	default:
2092 	  return false;
2093 	}
2094     }
2095   return false;
2096 }
2097 
2098 /* Walk the function, looking for NOTE_INSN_CFI notes.  Add the CFIs to the
2099    function's FDE, adding CFI labels and set_loc/advance_loc opcodes as
2100    necessary.  */
2101 static void
2102 add_cfis_to_fde (void)
2103 {
2104   dw_fde_ref fde = cfun->fde;
2105   rtx insn, next;
2106   /* We always start with a function_begin label.  */
2107   bool first = false;
2108 
2109   for (insn = get_insns (); insn; insn = next)
2110     {
2111       next = NEXT_INSN (insn);
2112 
2113       if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
2114 	{
2115 	  fde->dw_fde_switch_cfi_index = vec_safe_length (fde->dw_fde_cfi);
2116 	  /* Don't attempt to advance_loc4 between labels
2117 	     in different sections.  */
2118 	  first = true;
2119 	}
2120 
2121       if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_CFI)
2122 	{
2123 	  bool required = cfi_label_required_p (NOTE_CFI (insn));
2124 	  while (next)
2125 	    if (NOTE_P (next) && NOTE_KIND (next) == NOTE_INSN_CFI)
2126 	      {
2127 		required |= cfi_label_required_p (NOTE_CFI (next));
2128 		next = NEXT_INSN (next);
2129 	      }
2130 	    else if (active_insn_p (next)
2131 		     || (NOTE_P (next) && (NOTE_KIND (next)
2132 					   == NOTE_INSN_SWITCH_TEXT_SECTIONS)))
2133 	      break;
2134 	    else
2135 	      next = NEXT_INSN (next);
2136 	  if (required)
2137 	    {
2138 	      int num = dwarf2out_cfi_label_num;
2139 	      const char *label = dwarf2out_cfi_label ();
2140 	      dw_cfi_ref xcfi;
2141 	      rtx tmp;
2142 
2143 	      /* Set the location counter to the new label.  */
2144 	      xcfi = new_cfi ();
2145 	      xcfi->dw_cfi_opc = (first ? DW_CFA_set_loc
2146 				  : DW_CFA_advance_loc4);
2147 	      xcfi->dw_cfi_oprnd1.dw_cfi_addr = label;
2148 	      vec_safe_push (fde->dw_fde_cfi, xcfi);
2149 
2150 	      tmp = emit_note_before (NOTE_INSN_CFI_LABEL, insn);
2151 	      NOTE_LABEL_NUMBER (tmp) = num;
2152 	    }
2153 
2154 	  do
2155 	    {
2156 	      if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_CFI)
2157 		vec_safe_push (fde->dw_fde_cfi, NOTE_CFI (insn));
2158 	      insn = NEXT_INSN (insn);
2159 	    }
2160 	  while (insn != next);
2161 	  first = false;
2162 	}
2163     }
2164 }
2165 
2166 /* If LABEL is the start of a trace, then initialize the state of that
2167    trace from CUR_TRACE and CUR_ROW.  */
2168 
2169 static void
2170 maybe_record_trace_start (rtx start, rtx origin)
2171 {
2172   dw_trace_info *ti;
2173   HOST_WIDE_INT args_size;
2174 
2175   ti = get_trace_info (start);
2176   gcc_assert (ti != NULL);
2177 
2178   if (dump_file)
2179     {
2180       fprintf (dump_file, "   saw edge from trace %u to %u (via %s %d)\n",
2181 	       cur_trace->id, ti->id,
2182 	       (origin ? rtx_name[(int) GET_CODE (origin)] : "fallthru"),
2183 	       (origin ? INSN_UID (origin) : 0));
2184     }
2185 
2186   args_size = cur_trace->end_true_args_size;
2187   if (ti->beg_row == NULL)
2188     {
2189       /* This is the first time we've encountered this trace.  Propagate
2190 	 state across the edge and push the trace onto the work list.  */
2191       ti->beg_row = copy_cfi_row (cur_row);
2192       ti->beg_true_args_size = args_size;
2193 
2194       ti->cfa_store = cur_trace->cfa_store;
2195       ti->cfa_temp = cur_trace->cfa_temp;
2196       ti->regs_saved_in_regs = cur_trace->regs_saved_in_regs.copy ();
2197 
2198       trace_work_list.safe_push (ti);
2199 
2200       if (dump_file)
2201 	fprintf (dump_file, "\tpush trace %u to worklist\n", ti->id);
2202     }
2203   else
2204     {
2205 
2206       /* We ought to have the same state incoming to a given trace no
2207 	 matter how we arrive at the trace.  Anything else means we've
2208 	 got some kind of optimization error.  */
2209       gcc_checking_assert (cfi_row_equal_p (cur_row, ti->beg_row));
2210 
2211       /* The args_size is allowed to conflict if it isn't actually used.  */
2212       if (ti->beg_true_args_size != args_size)
2213 	ti->args_size_undefined = true;
2214     }
2215 }
2216 
2217 /* Similarly, but handle the args_size and CFA reset across EH
2218    and non-local goto edges.  */
2219 
2220 static void
2221 maybe_record_trace_start_abnormal (rtx start, rtx origin)
2222 {
2223   HOST_WIDE_INT save_args_size, delta;
2224   dw_cfa_location save_cfa;
2225 
2226   save_args_size = cur_trace->end_true_args_size;
2227   if (save_args_size == 0)
2228     {
2229       maybe_record_trace_start (start, origin);
2230       return;
2231     }
2232 
2233   delta = -save_args_size;
2234   cur_trace->end_true_args_size = 0;
2235 
2236   save_cfa = cur_row->cfa;
2237   if (cur_row->cfa.reg == dw_stack_pointer_regnum)
2238     {
2239       /* Convert a change in args_size (always a positive in the
2240 	 direction of stack growth) to a change in stack pointer.  */
2241 #ifndef STACK_GROWS_DOWNWARD
2242       delta = -delta;
2243 #endif
2244       cur_row->cfa.offset += delta;
2245     }
2246 
2247   maybe_record_trace_start (start, origin);
2248 
2249   cur_trace->end_true_args_size = save_args_size;
2250   cur_row->cfa = save_cfa;
2251 }
2252 
2253 /* Propagate CUR_TRACE state to the destinations implied by INSN.  */
2254 /* ??? Sadly, this is in large part a duplicate of make_edges.  */
2255 
2256 static void
2257 create_trace_edges (rtx insn)
2258 {
2259   rtx tmp, lab;
2260   int i, n;
2261 
2262   if (JUMP_P (insn))
2263     {
2264       if (find_reg_note (insn, REG_NON_LOCAL_GOTO, NULL_RTX))
2265 	return;
2266 
2267       if (tablejump_p (insn, NULL, &tmp))
2268 	{
2269 	  rtvec vec;
2270 
2271 	  tmp = PATTERN (tmp);
2272 	  vec = XVEC (tmp, GET_CODE (tmp) == ADDR_DIFF_VEC);
2273 
2274 	  n = GET_NUM_ELEM (vec);
2275 	  for (i = 0; i < n; ++i)
2276 	    {
2277 	      lab = XEXP (RTVEC_ELT (vec, i), 0);
2278 	      maybe_record_trace_start (lab, insn);
2279 	    }
2280 	}
2281       else if (computed_jump_p (insn))
2282 	{
2283 	  for (lab = forced_labels; lab; lab = XEXP (lab, 1))
2284 	    maybe_record_trace_start (XEXP (lab, 0), insn);
2285 	}
2286       else if (returnjump_p (insn))
2287 	;
2288       else if ((tmp = extract_asm_operands (PATTERN (insn))) != NULL)
2289 	{
2290 	  n = ASM_OPERANDS_LABEL_LENGTH (tmp);
2291 	  for (i = 0; i < n; ++i)
2292 	    {
2293 	      lab = XEXP (ASM_OPERANDS_LABEL (tmp, i), 0);
2294 	      maybe_record_trace_start (lab, insn);
2295 	    }
2296 	}
2297       else
2298 	{
2299 	  lab = JUMP_LABEL (insn);
2300 	  gcc_assert (lab != NULL);
2301 	  maybe_record_trace_start (lab, insn);
2302 	}
2303     }
2304   else if (CALL_P (insn))
2305     {
2306       /* Sibling calls don't have edges inside this function.  */
2307       if (SIBLING_CALL_P (insn))
2308 	return;
2309 
2310       /* Process non-local goto edges.  */
2311       if (can_nonlocal_goto (insn))
2312 	for (lab = nonlocal_goto_handler_labels; lab; lab = XEXP (lab, 1))
2313 	  maybe_record_trace_start_abnormal (XEXP (lab, 0), insn);
2314     }
2315   else if (GET_CODE (PATTERN (insn)) == SEQUENCE)
2316     {
2317       rtx seq = PATTERN (insn);
2318       int i, n = XVECLEN (seq, 0);
2319       for (i = 0; i < n; ++i)
2320 	create_trace_edges (XVECEXP (seq, 0, i));
2321       return;
2322     }
2323 
2324   /* Process EH edges.  */
2325   if (CALL_P (insn) || cfun->can_throw_non_call_exceptions)
2326     {
2327       eh_landing_pad lp = get_eh_landing_pad_from_rtx (insn);
2328       if (lp)
2329 	maybe_record_trace_start_abnormal (lp->landing_pad, insn);
2330     }
2331 }
2332 
2333 /* A subroutine of scan_trace.  Do what needs to be done "after" INSN.  */
2334 
2335 static void
2336 scan_insn_after (rtx insn)
2337 {
2338   if (RTX_FRAME_RELATED_P (insn))
2339     dwarf2out_frame_debug (insn);
2340   notice_args_size (insn);
2341 }
2342 
2343 /* Scan the trace beginning at INSN and create the CFI notes for the
2344    instructions therein.  */
2345 
2346 static void
2347 scan_trace (dw_trace_info *trace)
2348 {
2349   rtx prev, insn = trace->head;
2350   dw_cfa_location this_cfa;
2351 
2352   if (dump_file)
2353     fprintf (dump_file, "Processing trace %u : start at %s %d\n",
2354 	     trace->id, rtx_name[(int) GET_CODE (insn)],
2355 	     INSN_UID (insn));
2356 
2357   trace->end_row = copy_cfi_row (trace->beg_row);
2358   trace->end_true_args_size = trace->beg_true_args_size;
2359 
2360   cur_trace = trace;
2361   cur_row = trace->end_row;
2362 
2363   this_cfa = cur_row->cfa;
2364   cur_cfa = &this_cfa;
2365 
2366   for (prev = insn, insn = NEXT_INSN (insn);
2367        insn;
2368        prev = insn, insn = NEXT_INSN (insn))
2369     {
2370       rtx control;
2371 
2372       /* Do everything that happens "before" the insn.  */
2373       add_cfi_insn = prev;
2374 
2375       /* Notice the end of a trace.  */
2376       if (BARRIER_P (insn))
2377 	{
2378 	  /* Don't bother saving the unneeded queued registers at all.  */
2379 	  queued_reg_saves.truncate (0);
2380 	  break;
2381 	}
2382       if (save_point_p (insn))
2383 	{
2384 	  /* Propagate across fallthru edges.  */
2385 	  dwarf2out_flush_queued_reg_saves ();
2386 	  maybe_record_trace_start (insn, NULL);
2387 	  break;
2388 	}
2389 
2390       if (DEBUG_INSN_P (insn) || !inside_basic_block_p (insn))
2391 	continue;
2392 
2393       /* Handle all changes to the row state.  Sequences require special
2394 	 handling for the positioning of the notes.  */
2395       if (GET_CODE (PATTERN (insn)) == SEQUENCE)
2396 	{
2397 	  rtx elt, pat = PATTERN (insn);
2398 	  int i, n = XVECLEN (pat, 0);
2399 
2400 	  control = XVECEXP (pat, 0, 0);
2401 	  if (can_throw_internal (control))
2402 	    notice_eh_throw (control);
2403 	  dwarf2out_flush_queued_reg_saves ();
2404 
2405 	  if (JUMP_P (control) && INSN_ANNULLED_BRANCH_P (control))
2406 	    {
2407 	      /* ??? Hopefully multiple delay slots are not annulled.  */
2408 	      gcc_assert (n == 2);
2409 	      gcc_assert (!RTX_FRAME_RELATED_P (control));
2410 	      gcc_assert (!find_reg_note (control, REG_ARGS_SIZE, NULL));
2411 
2412 	      elt = XVECEXP (pat, 0, 1);
2413 
2414 	      if (INSN_FROM_TARGET_P (elt))
2415 		{
2416 		  HOST_WIDE_INT restore_args_size;
2417 		  cfi_vec save_row_reg_save;
2418 
2419 		  /* If ELT is an instruction from target of an annulled
2420 		     branch, the effects are for the target only and so
2421 		     the args_size and CFA along the current path
2422 		     shouldn't change.  */
2423 		  add_cfi_insn = NULL;
2424 		  restore_args_size = cur_trace->end_true_args_size;
2425 		  cur_cfa = &cur_row->cfa;
2426 		  save_row_reg_save = vec_safe_copy (cur_row->reg_save);
2427 
2428 		  scan_insn_after (elt);
2429 
2430 		  /* ??? Should we instead save the entire row state?  */
2431 		  gcc_assert (!queued_reg_saves.length ());
2432 
2433 		  create_trace_edges (control);
2434 
2435 		  cur_trace->end_true_args_size = restore_args_size;
2436 		  cur_row->cfa = this_cfa;
2437 		  cur_row->reg_save = save_row_reg_save;
2438 		  cur_cfa = &this_cfa;
2439 		}
2440 	      else
2441 		{
2442 		  /* If ELT is a annulled branch-taken instruction (i.e.
2443 		     executed only when branch is not taken), the args_size
2444 		     and CFA should not change through the jump.  */
2445 		  create_trace_edges (control);
2446 
2447 		  /* Update and continue with the trace.  */
2448 		  add_cfi_insn = insn;
2449 		  scan_insn_after (elt);
2450 		  def_cfa_1 (&this_cfa);
2451 		}
2452 	      continue;
2453 	    }
2454 
2455 	  /* The insns in the delay slot should all be considered to happen
2456 	     "before" a call insn.  Consider a call with a stack pointer
2457 	     adjustment in the delay slot.  The backtrace from the callee
2458 	     should include the sp adjustment.  Unfortunately, that leaves
2459 	     us with an unavoidable unwinding error exactly at the call insn
2460 	     itself.  For jump insns we'd prefer to avoid this error by
2461 	     placing the notes after the sequence.  */
2462 	  if (JUMP_P (control))
2463 	    add_cfi_insn = insn;
2464 
2465 	  for (i = 1; i < n; ++i)
2466 	    {
2467 	      elt = XVECEXP (pat, 0, i);
2468 	      scan_insn_after (elt);
2469 	    }
2470 
2471 	  /* Make sure any register saves are visible at the jump target.  */
2472 	  dwarf2out_flush_queued_reg_saves ();
2473 	  any_cfis_emitted = false;
2474 
2475           /* However, if there is some adjustment on the call itself, e.g.
2476 	     a call_pop, that action should be considered to happen after
2477 	     the call returns.  */
2478 	  add_cfi_insn = insn;
2479 	  scan_insn_after (control);
2480 	}
2481       else
2482 	{
2483 	  /* Flush data before calls and jumps, and of course if necessary.  */
2484 	  if (can_throw_internal (insn))
2485 	    {
2486 	      notice_eh_throw (insn);
2487 	      dwarf2out_flush_queued_reg_saves ();
2488 	    }
2489 	  else if (!NONJUMP_INSN_P (insn)
2490 		   || clobbers_queued_reg_save (insn)
2491 		   || find_reg_note (insn, REG_CFA_FLUSH_QUEUE, NULL))
2492 	    dwarf2out_flush_queued_reg_saves ();
2493 	  any_cfis_emitted = false;
2494 
2495 	  add_cfi_insn = insn;
2496 	  scan_insn_after (insn);
2497 	  control = insn;
2498 	}
2499 
2500       /* Between frame-related-p and args_size we might have otherwise
2501 	 emitted two cfa adjustments.  Do it now.  */
2502       def_cfa_1 (&this_cfa);
2503 
2504       /* Minimize the number of advances by emitting the entire queue
2505 	 once anything is emitted.  */
2506       if (any_cfis_emitted
2507 	  || find_reg_note (insn, REG_CFA_FLUSH_QUEUE, NULL))
2508 	dwarf2out_flush_queued_reg_saves ();
2509 
2510       /* Note that a test for control_flow_insn_p does exactly the
2511 	 same tests as are done to actually create the edges.  So
2512 	 always call the routine and let it not create edges for
2513 	 non-control-flow insns.  */
2514       create_trace_edges (control);
2515     }
2516 
2517   add_cfi_insn = NULL;
2518   cur_row = NULL;
2519   cur_trace = NULL;
2520   cur_cfa = NULL;
2521 }
2522 
2523 /* Scan the function and create the initial set of CFI notes.  */
2524 
2525 static void
2526 create_cfi_notes (void)
2527 {
2528   dw_trace_info *ti;
2529 
2530   gcc_checking_assert (!queued_reg_saves.exists ());
2531   gcc_checking_assert (!trace_work_list.exists ());
2532 
2533   /* Always begin at the entry trace.  */
2534   ti = &trace_info[0];
2535   scan_trace (ti);
2536 
2537   while (!trace_work_list.is_empty ())
2538     {
2539       ti = trace_work_list.pop ();
2540       scan_trace (ti);
2541     }
2542 
2543   queued_reg_saves.release ();
2544   trace_work_list.release ();
2545 }
2546 
2547 /* Return the insn before the first NOTE_INSN_CFI after START.  */
2548 
2549 static rtx
2550 before_next_cfi_note (rtx start)
2551 {
2552   rtx prev = start;
2553   while (start)
2554     {
2555       if (NOTE_P (start) && NOTE_KIND (start) == NOTE_INSN_CFI)
2556 	return prev;
2557       prev = start;
2558       start = NEXT_INSN (start);
2559     }
2560   gcc_unreachable ();
2561 }
2562 
2563 /* Insert CFI notes between traces to properly change state between them.  */
2564 
2565 static void
2566 connect_traces (void)
2567 {
2568   unsigned i, n = trace_info.length ();
2569   dw_trace_info *prev_ti, *ti;
2570 
2571   /* ??? Ideally, we should have both queued and processed every trace.
2572      However the current representation of constant pools on various targets
2573      is indistinguishable from unreachable code.  Assume for the moment that
2574      we can simply skip over such traces.  */
2575   /* ??? Consider creating a DATA_INSN rtx code to indicate that
2576      these are not "real" instructions, and should not be considered.
2577      This could be generically useful for tablejump data as well.  */
2578   /* Remove all unprocessed traces from the list.  */
2579   for (i = n - 1; i > 0; --i)
2580     {
2581       ti = &trace_info[i];
2582       if (ti->beg_row == NULL)
2583 	{
2584 	  trace_info.ordered_remove (i);
2585 	  n -= 1;
2586 	}
2587       else
2588 	gcc_assert (ti->end_row != NULL);
2589     }
2590 
2591   /* Work from the end back to the beginning.  This lets us easily insert
2592      remember/restore_state notes in the correct order wrt other notes.  */
2593   prev_ti = &trace_info[n - 1];
2594   for (i = n - 1; i > 0; --i)
2595     {
2596       dw_cfi_row *old_row;
2597 
2598       ti = prev_ti;
2599       prev_ti = &trace_info[i - 1];
2600 
2601       add_cfi_insn = ti->head;
2602 
2603       /* In dwarf2out_switch_text_section, we'll begin a new FDE
2604 	 for the portion of the function in the alternate text
2605 	 section.  The row state at the very beginning of that
2606 	 new FDE will be exactly the row state from the CIE.  */
2607       if (ti->switch_sections)
2608 	old_row = cie_cfi_row;
2609       else
2610 	{
2611 	  old_row = prev_ti->end_row;
2612 	  /* If there's no change from the previous end state, fine.  */
2613 	  if (cfi_row_equal_p (old_row, ti->beg_row))
2614 	    ;
2615 	  /* Otherwise check for the common case of sharing state with
2616 	     the beginning of an epilogue, but not the end.  Insert
2617 	     remember/restore opcodes in that case.  */
2618 	  else if (cfi_row_equal_p (prev_ti->beg_row, ti->beg_row))
2619 	    {
2620 	      dw_cfi_ref cfi;
2621 
2622 	      /* Note that if we blindly insert the remember at the
2623 		 start of the trace, we can wind up increasing the
2624 		 size of the unwind info due to extra advance opcodes.
2625 		 Instead, put the remember immediately before the next
2626 		 state change.  We know there must be one, because the
2627 		 state at the beginning and head of the trace differ.  */
2628 	      add_cfi_insn = before_next_cfi_note (prev_ti->head);
2629 	      cfi = new_cfi ();
2630 	      cfi->dw_cfi_opc = DW_CFA_remember_state;
2631 	      add_cfi (cfi);
2632 
2633 	      add_cfi_insn = ti->head;
2634 	      cfi = new_cfi ();
2635 	      cfi->dw_cfi_opc = DW_CFA_restore_state;
2636 	      add_cfi (cfi);
2637 
2638 	      old_row = prev_ti->beg_row;
2639 	    }
2640 	  /* Otherwise, we'll simply change state from the previous end.  */
2641 	}
2642 
2643       change_cfi_row (old_row, ti->beg_row);
2644 
2645       if (dump_file && add_cfi_insn != ti->head)
2646 	{
2647 	  rtx note;
2648 
2649 	  fprintf (dump_file, "Fixup between trace %u and %u:\n",
2650 		   prev_ti->id, ti->id);
2651 
2652 	  note = ti->head;
2653 	  do
2654 	    {
2655 	      note = NEXT_INSN (note);
2656 	      gcc_assert (NOTE_P (note) && NOTE_KIND (note) == NOTE_INSN_CFI);
2657 	      output_cfi_directive (dump_file, NOTE_CFI (note));
2658 	    }
2659 	  while (note != add_cfi_insn);
2660 	}
2661     }
2662 
2663   /* Connect args_size between traces that have can_throw_internal insns.  */
2664   if (cfun->eh->lp_array)
2665     {
2666       HOST_WIDE_INT prev_args_size = 0;
2667 
2668       for (i = 0; i < n; ++i)
2669 	{
2670 	  ti = &trace_info[i];
2671 
2672 	  if (ti->switch_sections)
2673 	    prev_args_size = 0;
2674 	  if (ti->eh_head == NULL)
2675 	    continue;
2676 	  gcc_assert (!ti->args_size_undefined);
2677 
2678 	  if (ti->beg_delay_args_size != prev_args_size)
2679 	    {
2680 	      /* ??? Search back to previous CFI note.  */
2681 	      add_cfi_insn = PREV_INSN (ti->eh_head);
2682 	      add_cfi_args_size (ti->beg_delay_args_size);
2683 	    }
2684 
2685 	  prev_args_size = ti->end_delay_args_size;
2686 	}
2687     }
2688 }
2689 
2690 /* Set up the pseudo-cfg of instruction traces, as described at the
2691    block comment at the top of the file.  */
2692 
2693 static void
2694 create_pseudo_cfg (void)
2695 {
2696   bool saw_barrier, switch_sections;
2697   dw_trace_info ti;
2698   rtx insn;
2699   unsigned i;
2700 
2701   /* The first trace begins at the start of the function,
2702      and begins with the CIE row state.  */
2703   trace_info.create (16);
2704   memset (&ti, 0, sizeof (ti));
2705   ti.head = get_insns ();
2706   ti.beg_row = cie_cfi_row;
2707   ti.cfa_store = cie_cfi_row->cfa;
2708   ti.cfa_temp.reg = INVALID_REGNUM;
2709   trace_info.quick_push (ti);
2710 
2711   if (cie_return_save)
2712     ti.regs_saved_in_regs.safe_push (*cie_return_save);
2713 
2714   /* Walk all the insns, collecting start of trace locations.  */
2715   saw_barrier = false;
2716   switch_sections = false;
2717   for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
2718     {
2719       if (BARRIER_P (insn))
2720 	saw_barrier = true;
2721       else if (NOTE_P (insn)
2722 	       && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
2723 	{
2724 	  /* We should have just seen a barrier.  */
2725 	  gcc_assert (saw_barrier);
2726 	  switch_sections = true;
2727 	}
2728       /* Watch out for save_point notes between basic blocks.
2729 	 In particular, a note after a barrier.  Do not record these,
2730 	 delaying trace creation until the label.  */
2731       else if (save_point_p (insn)
2732 	       && (LABEL_P (insn) || !saw_barrier))
2733 	{
2734 	  memset (&ti, 0, sizeof (ti));
2735 	  ti.head = insn;
2736 	  ti.switch_sections = switch_sections;
2737 	  ti.id = trace_info.length () - 1;
2738 	  trace_info.safe_push (ti);
2739 
2740 	  saw_barrier = false;
2741 	  switch_sections = false;
2742 	}
2743     }
2744 
2745   /* Create the trace index after we've finished building trace_info,
2746      avoiding stale pointer problems due to reallocation.  */
2747   trace_index = htab_create (trace_info.length (),
2748 			     dw_trace_info_hash, dw_trace_info_eq, NULL);
2749   dw_trace_info *tp;
2750   FOR_EACH_VEC_ELT (trace_info, i, tp)
2751     {
2752       void **slot;
2753 
2754       if (dump_file)
2755 	fprintf (dump_file, "Creating trace %u : start at %s %d%s\n", i,
2756 		 rtx_name[(int) GET_CODE (tp->head)], INSN_UID (tp->head),
2757 		 tp->switch_sections ? " (section switch)" : "");
2758 
2759       slot = htab_find_slot_with_hash (trace_index, tp,
2760 				       INSN_UID (tp->head), INSERT);
2761       gcc_assert (*slot == NULL);
2762       *slot = (void *) tp;
2763     }
2764 }
2765 
2766 /* Record the initial position of the return address.  RTL is
2767    INCOMING_RETURN_ADDR_RTX.  */
2768 
2769 static void
2770 initial_return_save (rtx rtl)
2771 {
2772   unsigned int reg = INVALID_REGNUM;
2773   HOST_WIDE_INT offset = 0;
2774 
2775   switch (GET_CODE (rtl))
2776     {
2777     case REG:
2778       /* RA is in a register.  */
2779       reg = dwf_regno (rtl);
2780       break;
2781 
2782     case MEM:
2783       /* RA is on the stack.  */
2784       rtl = XEXP (rtl, 0);
2785       switch (GET_CODE (rtl))
2786 	{
2787 	case REG:
2788 	  gcc_assert (REGNO (rtl) == STACK_POINTER_REGNUM);
2789 	  offset = 0;
2790 	  break;
2791 
2792 	case PLUS:
2793 	  gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
2794 	  offset = INTVAL (XEXP (rtl, 1));
2795 	  break;
2796 
2797 	case MINUS:
2798 	  gcc_assert (REGNO (XEXP (rtl, 0)) == STACK_POINTER_REGNUM);
2799 	  offset = -INTVAL (XEXP (rtl, 1));
2800 	  break;
2801 
2802 	default:
2803 	  gcc_unreachable ();
2804 	}
2805 
2806       break;
2807 
2808     case PLUS:
2809       /* The return address is at some offset from any value we can
2810 	 actually load.  For instance, on the SPARC it is in %i7+8. Just
2811 	 ignore the offset for now; it doesn't matter for unwinding frames.  */
2812       gcc_assert (CONST_INT_P (XEXP (rtl, 1)));
2813       initial_return_save (XEXP (rtl, 0));
2814       return;
2815 
2816     default:
2817       gcc_unreachable ();
2818     }
2819 
2820   if (reg != DWARF_FRAME_RETURN_COLUMN)
2821     {
2822       if (reg != INVALID_REGNUM)
2823         record_reg_saved_in_reg (rtl, pc_rtx);
2824       reg_save (DWARF_FRAME_RETURN_COLUMN, reg, offset - cur_row->cfa.offset);
2825     }
2826 }
2827 
2828 static void
2829 create_cie_data (void)
2830 {
2831   dw_cfa_location loc;
2832   dw_trace_info cie_trace;
2833 
2834   dw_stack_pointer_regnum = DWARF_FRAME_REGNUM (STACK_POINTER_REGNUM);
2835   dw_frame_pointer_regnum = DWARF_FRAME_REGNUM (HARD_FRAME_POINTER_REGNUM);
2836 
2837   memset (&cie_trace, 0, sizeof(cie_trace));
2838   cur_trace = &cie_trace;
2839 
2840   add_cfi_vec = &cie_cfi_vec;
2841   cie_cfi_row = cur_row = new_cfi_row ();
2842 
2843   /* On entry, the Canonical Frame Address is at SP.  */
2844   memset(&loc, 0, sizeof (loc));
2845   loc.reg = dw_stack_pointer_regnum;
2846   loc.offset = INCOMING_FRAME_SP_OFFSET;
2847   def_cfa_1 (&loc);
2848 
2849   if (targetm.debug_unwind_info () == UI_DWARF2
2850       || targetm_common.except_unwind_info (&global_options) == UI_DWARF2)
2851     {
2852       initial_return_save (INCOMING_RETURN_ADDR_RTX);
2853 
2854       /* For a few targets, we have the return address incoming into a
2855 	 register, but choose a different return column.  This will result
2856 	 in a DW_CFA_register for the return, and an entry in
2857 	 regs_saved_in_regs to match.  If the target later stores that
2858 	 return address register to the stack, we want to be able to emit
2859 	 the DW_CFA_offset against the return column, not the intermediate
2860 	 save register.  Save the contents of regs_saved_in_regs so that
2861 	 we can re-initialize it at the start of each function.  */
2862       switch (cie_trace.regs_saved_in_regs.length ())
2863 	{
2864 	case 0:
2865 	  break;
2866 	case 1:
2867 	  cie_return_save = ggc_alloc_reg_saved_in_data ();
2868 	  *cie_return_save = cie_trace.regs_saved_in_regs[0];
2869 	  cie_trace.regs_saved_in_regs.release ();
2870 	  break;
2871 	default:
2872 	  gcc_unreachable ();
2873 	}
2874     }
2875 
2876   add_cfi_vec = NULL;
2877   cur_row = NULL;
2878   cur_trace = NULL;
2879 }
2880 
2881 /* Annotate the function with NOTE_INSN_CFI notes to record the CFI
2882    state at each location within the function.  These notes will be
2883    emitted during pass_final.  */
2884 
2885 static unsigned int
2886 execute_dwarf2_frame (void)
2887 {
2888   /* The first time we're called, compute the incoming frame state.  */
2889   if (cie_cfi_vec == NULL)
2890     create_cie_data ();
2891 
2892   dwarf2out_alloc_current_fde ();
2893 
2894   create_pseudo_cfg ();
2895 
2896   /* Do the work.  */
2897   create_cfi_notes ();
2898   connect_traces ();
2899   add_cfis_to_fde ();
2900 
2901   /* Free all the data we allocated.  */
2902   {
2903     size_t i;
2904     dw_trace_info *ti;
2905 
2906     FOR_EACH_VEC_ELT (trace_info, i, ti)
2907       ti->regs_saved_in_regs.release ();
2908   }
2909   trace_info.release ();
2910 
2911   htab_delete (trace_index);
2912   trace_index = NULL;
2913 
2914   return 0;
2915 }
2916 
2917 /* Convert a DWARF call frame info. operation to its string name */
2918 
2919 static const char *
2920 dwarf_cfi_name (unsigned int cfi_opc)
2921 {
2922   const char *name = get_DW_CFA_name (cfi_opc);
2923 
2924   if (name != NULL)
2925     return name;
2926 
2927   return "DW_CFA_<unknown>";
2928 }
2929 
2930 /* This routine will generate the correct assembly data for a location
2931    description based on a cfi entry with a complex address.  */
2932 
2933 static void
2934 output_cfa_loc (dw_cfi_ref cfi, int for_eh)
2935 {
2936   dw_loc_descr_ref loc;
2937   unsigned long size;
2938 
2939   if (cfi->dw_cfi_opc == DW_CFA_expression)
2940     {
2941       unsigned r =
2942 	DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
2943       dw2_asm_output_data (1, r, NULL);
2944       loc = cfi->dw_cfi_oprnd2.dw_cfi_loc;
2945     }
2946   else
2947     loc = cfi->dw_cfi_oprnd1.dw_cfi_loc;
2948 
2949   /* Output the size of the block.  */
2950   size = size_of_locs (loc);
2951   dw2_asm_output_data_uleb128 (size, NULL);
2952 
2953   /* Now output the operations themselves.  */
2954   output_loc_sequence (loc, for_eh);
2955 }
2956 
2957 /* Similar, but used for .cfi_escape.  */
2958 
2959 static void
2960 output_cfa_loc_raw (dw_cfi_ref cfi)
2961 {
2962   dw_loc_descr_ref loc;
2963   unsigned long size;
2964 
2965   if (cfi->dw_cfi_opc == DW_CFA_expression)
2966     {
2967       unsigned r =
2968 	DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
2969       fprintf (asm_out_file, "%#x,", r);
2970       loc = cfi->dw_cfi_oprnd2.dw_cfi_loc;
2971     }
2972   else
2973     loc = cfi->dw_cfi_oprnd1.dw_cfi_loc;
2974 
2975   /* Output the size of the block.  */
2976   size = size_of_locs (loc);
2977   dw2_asm_output_data_uleb128_raw (size);
2978   fputc (',', asm_out_file);
2979 
2980   /* Now output the operations themselves.  */
2981   output_loc_sequence_raw (loc);
2982 }
2983 
2984 /* Output a Call Frame Information opcode and its operand(s).  */
2985 
2986 void
2987 output_cfi (dw_cfi_ref cfi, dw_fde_ref fde, int for_eh)
2988 {
2989   unsigned long r;
2990   HOST_WIDE_INT off;
2991 
2992   if (cfi->dw_cfi_opc == DW_CFA_advance_loc)
2993     dw2_asm_output_data (1, (cfi->dw_cfi_opc
2994 			     | (cfi->dw_cfi_oprnd1.dw_cfi_offset & 0x3f)),
2995 			 "DW_CFA_advance_loc " HOST_WIDE_INT_PRINT_HEX,
2996 			 ((unsigned HOST_WIDE_INT)
2997 			  cfi->dw_cfi_oprnd1.dw_cfi_offset));
2998   else if (cfi->dw_cfi_opc == DW_CFA_offset)
2999     {
3000       r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3001       dw2_asm_output_data (1, (cfi->dw_cfi_opc | (r & 0x3f)),
3002 			   "DW_CFA_offset, column %#lx", r);
3003       off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3004       dw2_asm_output_data_uleb128 (off, NULL);
3005     }
3006   else if (cfi->dw_cfi_opc == DW_CFA_restore)
3007     {
3008       r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3009       dw2_asm_output_data (1, (cfi->dw_cfi_opc | (r & 0x3f)),
3010 			   "DW_CFA_restore, column %#lx", r);
3011     }
3012   else
3013     {
3014       dw2_asm_output_data (1, cfi->dw_cfi_opc,
3015 			   "%s", dwarf_cfi_name (cfi->dw_cfi_opc));
3016 
3017       switch (cfi->dw_cfi_opc)
3018 	{
3019 	case DW_CFA_set_loc:
3020 	  if (for_eh)
3021 	    dw2_asm_output_encoded_addr_rtx (
3022 		ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/1, /*global=*/0),
3023 		gen_rtx_SYMBOL_REF (Pmode, cfi->dw_cfi_oprnd1.dw_cfi_addr),
3024 		false, NULL);
3025 	  else
3026 	    dw2_asm_output_addr (DWARF2_ADDR_SIZE,
3027 				 cfi->dw_cfi_oprnd1.dw_cfi_addr, NULL);
3028 	  fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3029 	  break;
3030 
3031 	case DW_CFA_advance_loc1:
3032 	  dw2_asm_output_delta (1, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3033 				fde->dw_fde_current_label, NULL);
3034 	  fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3035 	  break;
3036 
3037 	case DW_CFA_advance_loc2:
3038 	  dw2_asm_output_delta (2, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3039 				fde->dw_fde_current_label, NULL);
3040 	  fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3041 	  break;
3042 
3043 	case DW_CFA_advance_loc4:
3044 	  dw2_asm_output_delta (4, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3045 				fde->dw_fde_current_label, NULL);
3046 	  fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3047 	  break;
3048 
3049 	case DW_CFA_MIPS_advance_loc8:
3050 	  dw2_asm_output_delta (8, cfi->dw_cfi_oprnd1.dw_cfi_addr,
3051 				fde->dw_fde_current_label, NULL);
3052 	  fde->dw_fde_current_label = cfi->dw_cfi_oprnd1.dw_cfi_addr;
3053 	  break;
3054 
3055 	case DW_CFA_offset_extended:
3056 	  r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3057 	  dw2_asm_output_data_uleb128 (r, NULL);
3058 	  off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3059 	  dw2_asm_output_data_uleb128 (off, NULL);
3060 	  break;
3061 
3062 	case DW_CFA_def_cfa:
3063 	  r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3064 	  dw2_asm_output_data_uleb128 (r, NULL);
3065 	  dw2_asm_output_data_uleb128 (cfi->dw_cfi_oprnd2.dw_cfi_offset, NULL);
3066 	  break;
3067 
3068 	case DW_CFA_offset_extended_sf:
3069 	  r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3070 	  dw2_asm_output_data_uleb128 (r, NULL);
3071 	  off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3072 	  dw2_asm_output_data_sleb128 (off, NULL);
3073 	  break;
3074 
3075 	case DW_CFA_def_cfa_sf:
3076 	  r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3077 	  dw2_asm_output_data_uleb128 (r, NULL);
3078 	  off = div_data_align (cfi->dw_cfi_oprnd2.dw_cfi_offset);
3079 	  dw2_asm_output_data_sleb128 (off, NULL);
3080 	  break;
3081 
3082 	case DW_CFA_restore_extended:
3083 	case DW_CFA_undefined:
3084 	case DW_CFA_same_value:
3085 	case DW_CFA_def_cfa_register:
3086 	  r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3087 	  dw2_asm_output_data_uleb128 (r, NULL);
3088 	  break;
3089 
3090 	case DW_CFA_register:
3091 	  r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, for_eh);
3092 	  dw2_asm_output_data_uleb128 (r, NULL);
3093 	  r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd2.dw_cfi_reg_num, for_eh);
3094 	  dw2_asm_output_data_uleb128 (r, NULL);
3095 	  break;
3096 
3097 	case DW_CFA_def_cfa_offset:
3098 	case DW_CFA_GNU_args_size:
3099 	  dw2_asm_output_data_uleb128 (cfi->dw_cfi_oprnd1.dw_cfi_offset, NULL);
3100 	  break;
3101 
3102 	case DW_CFA_def_cfa_offset_sf:
3103 	  off = div_data_align (cfi->dw_cfi_oprnd1.dw_cfi_offset);
3104 	  dw2_asm_output_data_sleb128 (off, NULL);
3105 	  break;
3106 
3107 	case DW_CFA_GNU_window_save:
3108 	  break;
3109 
3110 	case DW_CFA_def_cfa_expression:
3111 	case DW_CFA_expression:
3112 	  output_cfa_loc (cfi, for_eh);
3113 	  break;
3114 
3115 	case DW_CFA_GNU_negative_offset_extended:
3116 	  /* Obsoleted by DW_CFA_offset_extended_sf.  */
3117 	  gcc_unreachable ();
3118 
3119 	default:
3120 	  break;
3121 	}
3122     }
3123 }
3124 
3125 /* Similar, but do it via assembler directives instead.  */
3126 
3127 void
3128 output_cfi_directive (FILE *f, dw_cfi_ref cfi)
3129 {
3130   unsigned long r, r2;
3131 
3132   switch (cfi->dw_cfi_opc)
3133     {
3134     case DW_CFA_advance_loc:
3135     case DW_CFA_advance_loc1:
3136     case DW_CFA_advance_loc2:
3137     case DW_CFA_advance_loc4:
3138     case DW_CFA_MIPS_advance_loc8:
3139     case DW_CFA_set_loc:
3140       /* Should only be created in a code path not followed when emitting
3141 	 via directives.  The assembler is going to take care of this for
3142 	 us.  But this routines is also used for debugging dumps, so
3143 	 print something.  */
3144       gcc_assert (f != asm_out_file);
3145       fprintf (f, "\t.cfi_advance_loc\n");
3146       break;
3147 
3148     case DW_CFA_offset:
3149     case DW_CFA_offset_extended:
3150     case DW_CFA_offset_extended_sf:
3151       r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3152       fprintf (f, "\t.cfi_offset %lu, "HOST_WIDE_INT_PRINT_DEC"\n",
3153 	       r, cfi->dw_cfi_oprnd2.dw_cfi_offset);
3154       break;
3155 
3156     case DW_CFA_restore:
3157     case DW_CFA_restore_extended:
3158       r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3159       fprintf (f, "\t.cfi_restore %lu\n", r);
3160       break;
3161 
3162     case DW_CFA_undefined:
3163       r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3164       fprintf (f, "\t.cfi_undefined %lu\n", r);
3165       break;
3166 
3167     case DW_CFA_same_value:
3168       r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3169       fprintf (f, "\t.cfi_same_value %lu\n", r);
3170       break;
3171 
3172     case DW_CFA_def_cfa:
3173     case DW_CFA_def_cfa_sf:
3174       r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3175       fprintf (f, "\t.cfi_def_cfa %lu, "HOST_WIDE_INT_PRINT_DEC"\n",
3176 	       r, cfi->dw_cfi_oprnd2.dw_cfi_offset);
3177       break;
3178 
3179     case DW_CFA_def_cfa_register:
3180       r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3181       fprintf (f, "\t.cfi_def_cfa_register %lu\n", r);
3182       break;
3183 
3184     case DW_CFA_register:
3185       r = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd1.dw_cfi_reg_num, 1);
3186       r2 = DWARF2_FRAME_REG_OUT (cfi->dw_cfi_oprnd2.dw_cfi_reg_num, 1);
3187       fprintf (f, "\t.cfi_register %lu, %lu\n", r, r2);
3188       break;
3189 
3190     case DW_CFA_def_cfa_offset:
3191     case DW_CFA_def_cfa_offset_sf:
3192       fprintf (f, "\t.cfi_def_cfa_offset "
3193 	       HOST_WIDE_INT_PRINT_DEC"\n",
3194 	       cfi->dw_cfi_oprnd1.dw_cfi_offset);
3195       break;
3196 
3197     case DW_CFA_remember_state:
3198       fprintf (f, "\t.cfi_remember_state\n");
3199       break;
3200     case DW_CFA_restore_state:
3201       fprintf (f, "\t.cfi_restore_state\n");
3202       break;
3203 
3204     case DW_CFA_GNU_args_size:
3205       if (f == asm_out_file)
3206 	{
3207 	  fprintf (f, "\t.cfi_escape %#x,", DW_CFA_GNU_args_size);
3208 	  dw2_asm_output_data_uleb128_raw (cfi->dw_cfi_oprnd1.dw_cfi_offset);
3209 	  if (flag_debug_asm)
3210 	    fprintf (f, "\t%s args_size "HOST_WIDE_INT_PRINT_DEC,
3211 		     ASM_COMMENT_START, cfi->dw_cfi_oprnd1.dw_cfi_offset);
3212 	  fputc ('\n', f);
3213 	}
3214       else
3215 	{
3216 	  fprintf (f, "\t.cfi_GNU_args_size "HOST_WIDE_INT_PRINT_DEC "\n",
3217 		   cfi->dw_cfi_oprnd1.dw_cfi_offset);
3218 	}
3219       break;
3220 
3221     case DW_CFA_GNU_window_save:
3222       fprintf (f, "\t.cfi_window_save\n");
3223       break;
3224 
3225     case DW_CFA_def_cfa_expression:
3226       if (f != asm_out_file)
3227 	{
3228 	  fprintf (f, "\t.cfi_def_cfa_expression ...\n");
3229 	  break;
3230 	}
3231       /* FALLTHRU */
3232     case DW_CFA_expression:
3233       if (f != asm_out_file)
3234 	{
3235 	  fprintf (f, "\t.cfi_cfa_expression ...\n");
3236 	  break;
3237 	}
3238       fprintf (f, "\t.cfi_escape %#x,", cfi->dw_cfi_opc);
3239       output_cfa_loc_raw (cfi);
3240       fputc ('\n', f);
3241       break;
3242 
3243     default:
3244       gcc_unreachable ();
3245     }
3246 }
3247 
3248 void
3249 dwarf2out_emit_cfi (dw_cfi_ref cfi)
3250 {
3251   if (dwarf2out_do_cfi_asm ())
3252     output_cfi_directive (asm_out_file, cfi);
3253 }
3254 
3255 static void
3256 dump_cfi_row (FILE *f, dw_cfi_row *row)
3257 {
3258   dw_cfi_ref cfi;
3259   unsigned i;
3260 
3261   cfi = row->cfa_cfi;
3262   if (!cfi)
3263     {
3264       dw_cfa_location dummy;
3265       memset(&dummy, 0, sizeof(dummy));
3266       dummy.reg = INVALID_REGNUM;
3267       cfi = def_cfa_0 (&dummy, &row->cfa);
3268     }
3269   output_cfi_directive (f, cfi);
3270 
3271   FOR_EACH_VEC_SAFE_ELT (row->reg_save, i, cfi)
3272     if (cfi)
3273       output_cfi_directive (f, cfi);
3274 }
3275 
3276 void debug_cfi_row (dw_cfi_row *row);
3277 
3278 void
3279 debug_cfi_row (dw_cfi_row *row)
3280 {
3281   dump_cfi_row (stderr, row);
3282 }
3283 
3284 
3285 /* Save the result of dwarf2out_do_frame across PCH.
3286    This variable is tri-state, with 0 unset, >0 true, <0 false.  */
3287 static GTY(()) signed char saved_do_cfi_asm = 0;
3288 
3289 /* Decide whether we want to emit frame unwind information for the current
3290    translation unit.  */
3291 
3292 bool
3293 dwarf2out_do_frame (void)
3294 {
3295   /* We want to emit correct CFA location expressions or lists, so we
3296      have to return true if we're going to output debug info, even if
3297      we're not going to output frame or unwind info.  */
3298   if (write_symbols == DWARF2_DEBUG || write_symbols == VMS_AND_DWARF2_DEBUG)
3299     return true;
3300 
3301   if (saved_do_cfi_asm > 0)
3302     return true;
3303 
3304   if (targetm.debug_unwind_info () == UI_DWARF2)
3305     return true;
3306 
3307   if ((flag_unwind_tables || flag_exceptions)
3308       && targetm_common.except_unwind_info (&global_options) == UI_DWARF2)
3309     return true;
3310 
3311   return false;
3312 }
3313 
3314 /* Decide whether to emit frame unwind via assembler directives.  */
3315 
3316 bool
3317 dwarf2out_do_cfi_asm (void)
3318 {
3319   int enc;
3320 
3321   if (saved_do_cfi_asm != 0)
3322     return saved_do_cfi_asm > 0;
3323 
3324   /* Assume failure for a moment.  */
3325   saved_do_cfi_asm = -1;
3326 
3327   if (!flag_dwarf2_cfi_asm || !dwarf2out_do_frame ())
3328     return false;
3329   if (!HAVE_GAS_CFI_PERSONALITY_DIRECTIVE)
3330     return false;
3331 
3332   /* Make sure the personality encoding is one the assembler can support.
3333      In particular, aligned addresses can't be handled.  */
3334   enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/2,/*global=*/1);
3335   if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
3336     return false;
3337   enc = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/0,/*global=*/0);
3338   if ((enc & 0x70) != 0 && (enc & 0x70) != DW_EH_PE_pcrel)
3339     return false;
3340 
3341   /* If we can't get the assembler to emit only .debug_frame, and we don't need
3342      dwarf2 unwind info for exceptions, then emit .debug_frame by hand.  */
3343   if (!HAVE_GAS_CFI_SECTIONS_DIRECTIVE
3344       && !flag_unwind_tables && !flag_exceptions
3345       && targetm_common.except_unwind_info (&global_options) != UI_DWARF2)
3346     return false;
3347 
3348   /* Success!  */
3349   saved_do_cfi_asm = 1;
3350   return true;
3351 }
3352 
3353 static bool
3354 gate_dwarf2_frame (void)
3355 {
3356 #ifndef HAVE_prologue
3357   /* Targets which still implement the prologue in assembler text
3358      cannot use the generic dwarf2 unwinding.  */
3359   return false;
3360 #endif
3361 
3362   /* ??? What to do for UI_TARGET unwinding?  They might be able to benefit
3363      from the optimized shrink-wrapping annotations that we will compute.
3364      For now, only produce the CFI notes for dwarf2.  */
3365   return dwarf2out_do_frame ();
3366 }
3367 
3368 struct rtl_opt_pass pass_dwarf2_frame =
3369 {
3370  {
3371   RTL_PASS,
3372   "dwarf2",			/* name */
3373   OPTGROUP_NONE,                /* optinfo_flags */
3374   gate_dwarf2_frame,		/* gate */
3375   execute_dwarf2_frame,		/* execute */
3376   NULL,				/* sub */
3377   NULL,				/* next */
3378   0,				/* static_pass_number */
3379   TV_FINAL,			/* tv_id */
3380   0,				/* properties_required */
3381   0,				/* properties_provided */
3382   0,				/* properties_destroyed */
3383   0,				/* todo_flags_start */
3384   0				/* todo_flags_finish */
3385  }
3386 };
3387 
3388 #include "gt-dwarf2cfi.h"
3389