xref: /netbsd-src/external/gpl3/gcc.old/dist/gcc/config/xtensa/xtensa.c (revision 8ecbf5f02b752fcb7debe1a8fab1dc82602bc760)
1 /* Subroutines for insn-output.c for Tensilica's Xtensa architecture.
2    Copyright (C) 2001-2018 Free Software Foundation, Inc.
3    Contributed by Bob Wilson (bwilson@tensilica.com) at Tensilica.
4 
5 This file is part of GCC.
6 
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11 
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15 for more details.
16 
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3.  If not see
19 <http://www.gnu.org/licenses/>.  */
20 
21 #define IN_TARGET_CODE 1
22 
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "backend.h"
27 #include "target.h"
28 #include "rtl.h"
29 #include "tree.h"
30 #include "gimple.h"
31 #include "cfghooks.h"
32 #include "df.h"
33 #include "memmodel.h"
34 #include "tm_p.h"
35 #include "stringpool.h"
36 #include "attribs.h"
37 #include "optabs.h"
38 #include "regs.h"
39 #include "emit-rtl.h"
40 #include "recog.h"
41 #include "diagnostic-core.h"
42 #include "cfgrtl.h"
43 #include "output.h"
44 #include "fold-const.h"
45 #include "stor-layout.h"
46 #include "calls.h"
47 #include "varasm.h"
48 #include "alias.h"
49 #include "explow.h"
50 #include "expr.h"
51 #include "reload.h"
52 #include "langhooks.h"
53 #include "gimplify.h"
54 #include "builtins.h"
55 #include "dumpfile.h"
56 #include "hw-doloop.h"
57 #include "rtl-iter.h"
58 
59 /* This file should be included last.  */
60 #include "target-def.h"
61 
62 /* Enumeration for all of the relational tests, so that we can build
63    arrays indexed by the test type, and not worry about the order
64    of EQ, NE, etc.  */
65 
66 enum internal_test
67 {
68   ITEST_EQ,
69   ITEST_NE,
70   ITEST_GT,
71   ITEST_GE,
72   ITEST_LT,
73   ITEST_LE,
74   ITEST_GTU,
75   ITEST_GEU,
76   ITEST_LTU,
77   ITEST_LEU,
78   ITEST_MAX
79 };
80 
81 /* Array giving truth value on whether or not a given hard register
82    can support a given mode.  */
83 static char xtensa_hard_regno_mode_ok_p
84   [(int) MAX_MACHINE_MODE][FIRST_PSEUDO_REGISTER];
85 
86 /* Largest block move to handle in-line.  */
87 #define LARGEST_MOVE_RATIO 15
88 
89 /* Define the structure for the machine field in struct function.  */
90 struct GTY(()) machine_function
91 {
92   int accesses_prev_frame;
93   bool need_a7_copy;
94   bool vararg_a7;
95   rtx vararg_a7_copy;
96   rtx_insn *set_frame_ptr_insn;
97   /* Current frame size calculated by compute_frame_size.  */
98   unsigned current_frame_size;
99   /* Callee-save area size in the current frame calculated by
100      compute_frame_size.  */
101   int callee_save_size;
102   bool frame_laid_out;
103   bool epilogue_done;
104 };
105 
106 /* Vector, indexed by hard register number, which contains 1 for a
107    register that is allowable in a candidate for leaf function
108    treatment.  */
109 
110 const char xtensa_leaf_regs[FIRST_PSEUDO_REGISTER] =
111 {
112   1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
113   1, 1, 1,
114   1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
115   1
116 };
117 
118 static void xtensa_option_override (void);
119 static enum internal_test map_test_to_internal_test (enum rtx_code);
120 static rtx gen_int_relational (enum rtx_code, rtx, rtx, int *);
121 static rtx gen_float_relational (enum rtx_code, rtx, rtx);
122 static rtx gen_conditional_move (enum rtx_code, machine_mode, rtx, rtx);
123 static rtx fixup_subreg_mem (rtx);
124 static struct machine_function * xtensa_init_machine_status (void);
125 static rtx xtensa_legitimize_tls_address (rtx);
126 static rtx xtensa_legitimize_address (rtx, rtx, machine_mode);
127 static bool xtensa_mode_dependent_address_p (const_rtx, addr_space_t);
128 static bool xtensa_return_in_msb (const_tree);
129 static void printx (FILE *, signed int);
130 static rtx xtensa_builtin_saveregs (void);
131 static bool xtensa_legitimate_address_p (machine_mode, rtx, bool);
132 static unsigned int xtensa_multibss_section_type_flags (tree, const char *,
133 							int) ATTRIBUTE_UNUSED;
134 static section *xtensa_select_rtx_section (machine_mode, rtx,
135 					   unsigned HOST_WIDE_INT);
136 static bool xtensa_rtx_costs (rtx, machine_mode, int, int, int *, bool);
137 static int xtensa_register_move_cost (machine_mode, reg_class_t,
138 				      reg_class_t);
139 static int xtensa_memory_move_cost (machine_mode, reg_class_t, bool);
140 static tree xtensa_build_builtin_va_list (void);
141 static bool xtensa_return_in_memory (const_tree, const_tree);
142 static tree xtensa_gimplify_va_arg_expr (tree, tree, gimple_seq *,
143 					 gimple_seq *);
144 static void xtensa_function_arg_advance (cumulative_args_t, machine_mode,
145 					 const_tree, bool);
146 static rtx xtensa_function_arg (cumulative_args_t, machine_mode,
147 				const_tree, bool);
148 static rtx xtensa_function_incoming_arg (cumulative_args_t,
149 					 machine_mode, const_tree, bool);
150 static rtx xtensa_function_value (const_tree, const_tree, bool);
151 static rtx xtensa_libcall_value (machine_mode, const_rtx);
152 static bool xtensa_function_value_regno_p (const unsigned int);
153 static unsigned int xtensa_function_arg_boundary (machine_mode,
154 						  const_tree);
155 static void xtensa_init_builtins (void);
156 static tree xtensa_fold_builtin (tree, int, tree *, bool);
157 static rtx xtensa_expand_builtin (tree, rtx, rtx, machine_mode, int);
158 static void xtensa_va_start (tree, rtx);
159 static bool xtensa_frame_pointer_required (void);
160 static rtx xtensa_static_chain (const_tree, bool);
161 static void xtensa_asm_trampoline_template (FILE *);
162 static void xtensa_trampoline_init (rtx, tree, rtx);
163 static bool xtensa_output_addr_const_extra (FILE *, rtx);
164 static bool xtensa_cannot_force_const_mem (machine_mode, rtx);
165 
166 static reg_class_t xtensa_preferred_reload_class (rtx, reg_class_t);
167 static reg_class_t xtensa_preferred_output_reload_class (rtx, reg_class_t);
168 static reg_class_t xtensa_secondary_reload (bool, rtx, reg_class_t,
169 					    machine_mode,
170 					    struct secondary_reload_info *);
171 
172 static bool constantpool_address_p (const_rtx addr);
173 static bool xtensa_legitimate_constant_p (machine_mode, rtx);
174 static void xtensa_reorg (void);
175 static bool xtensa_can_use_doloop_p (const widest_int &, const widest_int &,
176                                      unsigned int, bool);
177 static const char *xtensa_invalid_within_doloop (const rtx_insn *);
178 
179 static bool xtensa_member_type_forces_blk (const_tree,
180 					   machine_mode mode);
181 
182 static void xtensa_conditional_register_usage (void);
183 static unsigned int xtensa_hard_regno_nregs (unsigned int, machine_mode);
184 static bool xtensa_hard_regno_mode_ok (unsigned int, machine_mode);
185 static bool xtensa_modes_tieable_p (machine_mode, machine_mode);
186 static HOST_WIDE_INT xtensa_constant_alignment (const_tree, HOST_WIDE_INT);
187 static HOST_WIDE_INT xtensa_starting_frame_offset (void);
188 static unsigned HOST_WIDE_INT xtensa_asan_shadow_offset (void);
189 
190 
191 
192 /* These hooks specify assembly directives for creating certain kinds
193    of integer object.  */
194 
195 #undef TARGET_ASM_ALIGNED_SI_OP
196 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
197 
198 #undef TARGET_ASM_SELECT_RTX_SECTION
199 #define TARGET_ASM_SELECT_RTX_SECTION  xtensa_select_rtx_section
200 
201 #undef TARGET_LEGITIMIZE_ADDRESS
202 #define TARGET_LEGITIMIZE_ADDRESS xtensa_legitimize_address
203 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
204 #define TARGET_MODE_DEPENDENT_ADDRESS_P xtensa_mode_dependent_address_p
205 
206 #undef TARGET_REGISTER_MOVE_COST
207 #define TARGET_REGISTER_MOVE_COST xtensa_register_move_cost
208 #undef TARGET_MEMORY_MOVE_COST
209 #define TARGET_MEMORY_MOVE_COST xtensa_memory_move_cost
210 #undef TARGET_RTX_COSTS
211 #define TARGET_RTX_COSTS xtensa_rtx_costs
212 #undef TARGET_ADDRESS_COST
213 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
214 
215 #undef TARGET_MEMBER_TYPE_FORCES_BLK
216 #define TARGET_MEMBER_TYPE_FORCES_BLK xtensa_member_type_forces_blk
217 
218 #undef TARGET_BUILD_BUILTIN_VA_LIST
219 #define TARGET_BUILD_BUILTIN_VA_LIST xtensa_build_builtin_va_list
220 
221 #undef TARGET_EXPAND_BUILTIN_VA_START
222 #define TARGET_EXPAND_BUILTIN_VA_START xtensa_va_start
223 
224 #undef TARGET_PROMOTE_FUNCTION_MODE
225 #define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
226 #undef TARGET_PROMOTE_PROTOTYPES
227 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
228 
229 #undef TARGET_RETURN_IN_MEMORY
230 #define TARGET_RETURN_IN_MEMORY xtensa_return_in_memory
231 #undef TARGET_FUNCTION_VALUE
232 #define TARGET_FUNCTION_VALUE xtensa_function_value
233 #undef TARGET_LIBCALL_VALUE
234 #define TARGET_LIBCALL_VALUE xtensa_libcall_value
235 #undef TARGET_FUNCTION_VALUE_REGNO_P
236 #define TARGET_FUNCTION_VALUE_REGNO_P xtensa_function_value_regno_p
237 
238 #undef TARGET_SPLIT_COMPLEX_ARG
239 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
240 #undef TARGET_MUST_PASS_IN_STACK
241 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
242 #undef TARGET_FUNCTION_ARG_ADVANCE
243 #define TARGET_FUNCTION_ARG_ADVANCE xtensa_function_arg_advance
244 #undef TARGET_FUNCTION_ARG
245 #define TARGET_FUNCTION_ARG xtensa_function_arg
246 #undef TARGET_FUNCTION_INCOMING_ARG
247 #define TARGET_FUNCTION_INCOMING_ARG xtensa_function_incoming_arg
248 #undef TARGET_FUNCTION_ARG_BOUNDARY
249 #define TARGET_FUNCTION_ARG_BOUNDARY xtensa_function_arg_boundary
250 
251 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
252 #define TARGET_EXPAND_BUILTIN_SAVEREGS xtensa_builtin_saveregs
253 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
254 #define TARGET_GIMPLIFY_VA_ARG_EXPR xtensa_gimplify_va_arg_expr
255 
256 #undef TARGET_RETURN_IN_MSB
257 #define TARGET_RETURN_IN_MSB xtensa_return_in_msb
258 
259 #undef  TARGET_INIT_BUILTINS
260 #define TARGET_INIT_BUILTINS xtensa_init_builtins
261 #undef  TARGET_FOLD_BUILTIN
262 #define TARGET_FOLD_BUILTIN xtensa_fold_builtin
263 #undef  TARGET_EXPAND_BUILTIN
264 #define TARGET_EXPAND_BUILTIN xtensa_expand_builtin
265 
266 #undef  TARGET_PREFERRED_RELOAD_CLASS
267 #define TARGET_PREFERRED_RELOAD_CLASS xtensa_preferred_reload_class
268 #undef  TARGET_PREFERRED_OUTPUT_RELOAD_CLASS
269 #define TARGET_PREFERRED_OUTPUT_RELOAD_CLASS xtensa_preferred_output_reload_class
270 
271 #undef TARGET_SECONDARY_RELOAD
272 #define TARGET_SECONDARY_RELOAD xtensa_secondary_reload
273 
274 #undef TARGET_HAVE_TLS
275 #define TARGET_HAVE_TLS (TARGET_THREADPTR && HAVE_AS_TLS)
276 
277 #undef TARGET_CANNOT_FORCE_CONST_MEM
278 #define TARGET_CANNOT_FORCE_CONST_MEM xtensa_cannot_force_const_mem
279 
280 #undef TARGET_LRA_P
281 #define TARGET_LRA_P hook_bool_void_false
282 
283 #undef TARGET_LEGITIMATE_ADDRESS_P
284 #define TARGET_LEGITIMATE_ADDRESS_P	xtensa_legitimate_address_p
285 
286 #undef TARGET_FRAME_POINTER_REQUIRED
287 #define TARGET_FRAME_POINTER_REQUIRED xtensa_frame_pointer_required
288 
289 #undef TARGET_STATIC_CHAIN
290 #define TARGET_STATIC_CHAIN xtensa_static_chain
291 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
292 #define TARGET_ASM_TRAMPOLINE_TEMPLATE xtensa_asm_trampoline_template
293 #undef TARGET_TRAMPOLINE_INIT
294 #define TARGET_TRAMPOLINE_INIT xtensa_trampoline_init
295 
296 #undef TARGET_OPTION_OVERRIDE
297 #define TARGET_OPTION_OVERRIDE xtensa_option_override
298 
299 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
300 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA xtensa_output_addr_const_extra
301 
302 #undef TARGET_LEGITIMATE_CONSTANT_P
303 #define TARGET_LEGITIMATE_CONSTANT_P xtensa_legitimate_constant_p
304 
305 #undef TARGET_MACHINE_DEPENDENT_REORG
306 #define TARGET_MACHINE_DEPENDENT_REORG xtensa_reorg
307 
308 #undef TARGET_CAN_USE_DOLOOP_P
309 #define TARGET_CAN_USE_DOLOOP_P xtensa_can_use_doloop_p
310 
311 #undef TARGET_INVALID_WITHIN_DOLOOP
312 #define TARGET_INVALID_WITHIN_DOLOOP xtensa_invalid_within_doloop
313 
314 #undef TARGET_CONDITIONAL_REGISTER_USAGE
315 #define TARGET_CONDITIONAL_REGISTER_USAGE xtensa_conditional_register_usage
316 
317 #undef TARGET_HARD_REGNO_NREGS
318 #define TARGET_HARD_REGNO_NREGS xtensa_hard_regno_nregs
319 #undef TARGET_HARD_REGNO_MODE_OK
320 #define TARGET_HARD_REGNO_MODE_OK xtensa_hard_regno_mode_ok
321 
322 #undef TARGET_MODES_TIEABLE_P
323 #define TARGET_MODES_TIEABLE_P xtensa_modes_tieable_p
324 
325 #undef TARGET_CONSTANT_ALIGNMENT
326 #define TARGET_CONSTANT_ALIGNMENT xtensa_constant_alignment
327 
328 #undef TARGET_STARTING_FRAME_OFFSET
329 #define TARGET_STARTING_FRAME_OFFSET xtensa_starting_frame_offset
330 
331 #undef TARGET_ASAN_SHADOW_OFFSET
332 #define TARGET_ASAN_SHADOW_OFFSET xtensa_asan_shadow_offset
333 
334 struct gcc_target targetm = TARGET_INITIALIZER;
335 
336 
337 /* Functions to test Xtensa immediate operand validity.  */
338 
339 bool
340 xtensa_simm8 (HOST_WIDE_INT v)
341 {
342   return v >= -128 && v <= 127;
343 }
344 
345 
346 bool
347 xtensa_simm8x256 (HOST_WIDE_INT v)
348 {
349   return (v & 255) == 0 && (v >= -32768 && v <= 32512);
350 }
351 
352 
353 bool
354 xtensa_simm12b (HOST_WIDE_INT v)
355 {
356   return v >= -2048 && v <= 2047;
357 }
358 
359 
360 static bool
361 xtensa_uimm8 (HOST_WIDE_INT v)
362 {
363   return v >= 0 && v <= 255;
364 }
365 
366 
367 static bool
368 xtensa_uimm8x2 (HOST_WIDE_INT v)
369 {
370   return (v & 1) == 0 && (v >= 0 && v <= 510);
371 }
372 
373 
374 static bool
375 xtensa_uimm8x4 (HOST_WIDE_INT v)
376 {
377   return (v & 3) == 0 && (v >= 0 && v <= 1020);
378 }
379 
380 
381 static bool
382 xtensa_b4const (HOST_WIDE_INT v)
383 {
384   switch (v)
385     {
386     case -1:
387     case 1:
388     case 2:
389     case 3:
390     case 4:
391     case 5:
392     case 6:
393     case 7:
394     case 8:
395     case 10:
396     case 12:
397     case 16:
398     case 32:
399     case 64:
400     case 128:
401     case 256:
402       return true;
403     }
404   return false;
405 }
406 
407 
408 bool
409 xtensa_b4const_or_zero (HOST_WIDE_INT v)
410 {
411   if (v == 0)
412     return true;
413   return xtensa_b4const (v);
414 }
415 
416 
417 bool
418 xtensa_b4constu (HOST_WIDE_INT v)
419 {
420   switch (v)
421     {
422     case 32768:
423     case 65536:
424     case 2:
425     case 3:
426     case 4:
427     case 5:
428     case 6:
429     case 7:
430     case 8:
431     case 10:
432     case 12:
433     case 16:
434     case 32:
435     case 64:
436     case 128:
437     case 256:
438       return true;
439     }
440   return false;
441 }
442 
443 
444 bool
445 xtensa_mask_immediate (HOST_WIDE_INT v)
446 {
447 #define MAX_MASK_SIZE 16
448   int mask_size;
449 
450   for (mask_size = 1; mask_size <= MAX_MASK_SIZE; mask_size++)
451     {
452       if ((v & 1) == 0)
453 	return false;
454       v = v >> 1;
455       if (v == 0)
456 	return true;
457     }
458 
459   return false;
460 }
461 
462 
463 /* This is just like the standard true_regnum() function except that it
464    works even when reg_renumber is not initialized.  */
465 
466 int
467 xt_true_regnum (rtx x)
468 {
469   if (GET_CODE (x) == REG)
470     {
471       if (reg_renumber
472 	  && REGNO (x) >= FIRST_PSEUDO_REGISTER
473 	  && reg_renumber[REGNO (x)] >= 0)
474 	return reg_renumber[REGNO (x)];
475       return REGNO (x);
476     }
477   if (GET_CODE (x) == SUBREG)
478     {
479       int base = xt_true_regnum (SUBREG_REG (x));
480       if (base >= 0 && base < FIRST_PSEUDO_REGISTER)
481         return base + subreg_regno_offset (REGNO (SUBREG_REG (x)),
482                                            GET_MODE (SUBREG_REG (x)),
483                                            SUBREG_BYTE (x), GET_MODE (x));
484     }
485   return -1;
486 }
487 
488 
489 int
490 xtensa_valid_move (machine_mode mode, rtx *operands)
491 {
492   /* Either the destination or source must be a register, and the
493      MAC16 accumulator doesn't count.  */
494 
495   if (register_operand (operands[0], mode))
496     {
497       int dst_regnum = xt_true_regnum (operands[0]);
498 
499       if (xtensa_tls_referenced_p (operands[1]))
500 	return FALSE;
501 
502       /* The stack pointer can only be assigned with a MOVSP opcode.  */
503       if (dst_regnum == STACK_POINTER_REGNUM)
504 	return !TARGET_WINDOWED_ABI
505 	  || (mode == SImode
506 	      && register_operand (operands[1], mode)
507 	      && !ACC_REG_P (xt_true_regnum (operands[1])));
508 
509       if (!ACC_REG_P (dst_regnum))
510 	return true;
511     }
512   if (register_operand (operands[1], mode))
513     {
514       int src_regnum = xt_true_regnum (operands[1]);
515       if (!ACC_REG_P (src_regnum))
516 	return true;
517     }
518   return FALSE;
519 }
520 
521 
522 int
523 smalloffset_mem_p (rtx op)
524 {
525   if (GET_CODE (op) == MEM)
526     {
527       rtx addr = XEXP (op, 0);
528       if (GET_CODE (addr) == REG)
529 	return BASE_REG_P (addr, 0);
530       if (GET_CODE (addr) == PLUS)
531 	{
532 	  rtx offset = XEXP (addr, 0);
533 	  HOST_WIDE_INT val;
534 	  if (GET_CODE (offset) != CONST_INT)
535 	    offset = XEXP (addr, 1);
536 	  if (GET_CODE (offset) != CONST_INT)
537 	    return FALSE;
538 
539 	  val = INTVAL (offset);
540 	  return (val & 3) == 0 && (val >= 0 && val <= 60);
541 	}
542     }
543   return FALSE;
544 }
545 
546 
547 static bool
548 constantpool_address_p (const_rtx addr)
549 {
550   const_rtx sym = addr;
551 
552   if (GET_CODE (addr) == CONST)
553     {
554       rtx offset;
555 
556       /* Only handle (PLUS (SYM, OFFSET)) form.  */
557       addr = XEXP (addr, 0);
558       if (GET_CODE (addr) != PLUS)
559 	return false;
560 
561       /* Make sure the address is word aligned.  */
562       offset = XEXP (addr, 1);
563       if ((!CONST_INT_P (offset))
564 	  || ((INTVAL (offset) & 3) != 0))
565 	return false;
566 
567       sym = XEXP (addr, 0);
568     }
569 
570   if ((GET_CODE (sym) == SYMBOL_REF)
571       && CONSTANT_POOL_ADDRESS_P (sym))
572     return true;
573   return false;
574 }
575 
576 
577 int
578 constantpool_mem_p (rtx op)
579 {
580   if (GET_CODE (op) == SUBREG)
581     op = SUBREG_REG (op);
582   if (GET_CODE (op) == MEM)
583     return constantpool_address_p (XEXP (op, 0));
584   return FALSE;
585 }
586 
587 
588 /* Return TRUE if X is a thread-local symbol.  */
589 
590 static bool
591 xtensa_tls_symbol_p (rtx x)
592 {
593   if (! TARGET_HAVE_TLS)
594     return false;
595 
596   return GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x) != 0;
597 }
598 
599 
600 void
601 xtensa_extend_reg (rtx dst, rtx src)
602 {
603   rtx temp = gen_reg_rtx (SImode);
604   rtx shift = GEN_INT (BITS_PER_WORD - GET_MODE_BITSIZE (GET_MODE (src)));
605 
606   /* Generate paradoxical subregs as needed so that the modes match.  */
607   src = simplify_gen_subreg (SImode, src, GET_MODE (src), 0);
608   dst = simplify_gen_subreg (SImode, dst, GET_MODE (dst), 0);
609 
610   emit_insn (gen_ashlsi3 (temp, src, shift));
611   emit_insn (gen_ashrsi3 (dst, temp, shift));
612 }
613 
614 
615 bool
616 xtensa_mem_offset (unsigned v, machine_mode mode)
617 {
618   switch (mode)
619     {
620     case E_BLKmode:
621       /* Handle the worst case for block moves.  See xtensa_expand_block_move
622 	 where we emit an optimized block move operation if the block can be
623 	 moved in < "move_ratio" pieces.  The worst case is when the block is
624 	 aligned but has a size of (3 mod 4) (does this happen?) so that the
625 	 last piece requires a byte load/store.  */
626       return (xtensa_uimm8 (v)
627 	      && xtensa_uimm8 (v + MOVE_MAX * LARGEST_MOVE_RATIO));
628 
629     case E_QImode:
630       return xtensa_uimm8 (v);
631 
632     case E_HImode:
633       return xtensa_uimm8x2 (v);
634 
635     case E_DImode:
636     case E_DFmode:
637       return (xtensa_uimm8x4 (v) && xtensa_uimm8x4 (v + 4));
638 
639     default:
640       break;
641     }
642 
643   return xtensa_uimm8x4 (v);
644 }
645 
646 
647 /* Make normal rtx_code into something we can index from an array.  */
648 
649 static enum internal_test
650 map_test_to_internal_test (enum rtx_code test_code)
651 {
652   enum internal_test test = ITEST_MAX;
653 
654   switch (test_code)
655     {
656     default:			break;
657     case EQ:  test = ITEST_EQ;  break;
658     case NE:  test = ITEST_NE;  break;
659     case GT:  test = ITEST_GT;  break;
660     case GE:  test = ITEST_GE;  break;
661     case LT:  test = ITEST_LT;  break;
662     case LE:  test = ITEST_LE;  break;
663     case GTU: test = ITEST_GTU; break;
664     case GEU: test = ITEST_GEU; break;
665     case LTU: test = ITEST_LTU; break;
666     case LEU: test = ITEST_LEU; break;
667     }
668 
669   return test;
670 }
671 
672 
673 /* Generate the code to compare two integer values.  The return value is
674    the comparison expression.  */
675 
676 static rtx
677 gen_int_relational (enum rtx_code test_code, /* relational test (EQ, etc) */
678 		    rtx cmp0, /* first operand to compare */
679 		    rtx cmp1, /* second operand to compare */
680 		    int *p_invert /* whether branch needs to reverse test */)
681 {
682   struct cmp_info
683   {
684     enum rtx_code test_code;	/* test code to use in insn */
685     bool (*const_range_p) (HOST_WIDE_INT); /* range check function */
686     int const_add;		/* constant to add (convert LE -> LT) */
687     int reverse_regs;		/* reverse registers in test */
688     int invert_const;		/* != 0 if invert value if cmp1 is constant */
689     int invert_reg;		/* != 0 if invert value if cmp1 is register */
690     int unsignedp;		/* != 0 for unsigned comparisons.  */
691   };
692 
693   static struct cmp_info info[ (int)ITEST_MAX ] = {
694 
695     { EQ,	xtensa_b4const_or_zero,	0, 0, 0, 0, 0 },	/* EQ  */
696     { NE,	xtensa_b4const_or_zero,	0, 0, 0, 0, 0 },	/* NE  */
697 
698     { LT,	xtensa_b4const_or_zero,	1, 1, 1, 0, 0 },	/* GT  */
699     { GE,	xtensa_b4const_or_zero,	0, 0, 0, 0, 0 },	/* GE  */
700     { LT,	xtensa_b4const_or_zero,	0, 0, 0, 0, 0 },	/* LT  */
701     { GE,	xtensa_b4const_or_zero,	1, 1, 1, 0, 0 },	/* LE  */
702 
703     { LTU,	xtensa_b4constu,	1, 1, 1, 0, 1 },	/* GTU */
704     { GEU,	xtensa_b4constu,	0, 0, 0, 0, 1 },	/* GEU */
705     { LTU,	xtensa_b4constu,	0, 0, 0, 0, 1 },	/* LTU */
706     { GEU,	xtensa_b4constu,	1, 1, 1, 0, 1 },	/* LEU */
707   };
708 
709   enum internal_test test;
710   machine_mode mode;
711   struct cmp_info *p_info;
712 
713   test = map_test_to_internal_test (test_code);
714   gcc_assert (test != ITEST_MAX);
715 
716   p_info = &info[ (int)test ];
717 
718   mode = GET_MODE (cmp0);
719   if (mode == VOIDmode)
720     mode = GET_MODE (cmp1);
721 
722   /* Make sure we can handle any constants given to us.  */
723   if (GET_CODE (cmp1) == CONST_INT)
724     {
725       HOST_WIDE_INT value = INTVAL (cmp1);
726       unsigned HOST_WIDE_INT uvalue = (unsigned HOST_WIDE_INT)value;
727 
728       /* if the immediate overflows or does not fit in the immediate field,
729 	 spill it to a register */
730 
731       if ((p_info->unsignedp ?
732 	   (uvalue + p_info->const_add > uvalue) :
733 	   (value + p_info->const_add > value)) != (p_info->const_add > 0))
734 	{
735 	  cmp1 = force_reg (mode, cmp1);
736 	}
737       else if (!(p_info->const_range_p) (value + p_info->const_add))
738 	{
739 	  cmp1 = force_reg (mode, cmp1);
740 	}
741     }
742   else if ((GET_CODE (cmp1) != REG) && (GET_CODE (cmp1) != SUBREG))
743     {
744       cmp1 = force_reg (mode, cmp1);
745     }
746 
747   /* See if we need to invert the result.  */
748   *p_invert = ((GET_CODE (cmp1) == CONST_INT)
749 	       ? p_info->invert_const
750 	       : p_info->invert_reg);
751 
752   /* Comparison to constants, may involve adding 1 to change a LT into LE.
753      Comparison between two registers, may involve switching operands.  */
754   if (GET_CODE (cmp1) == CONST_INT)
755     {
756       if (p_info->const_add != 0)
757 	cmp1 = GEN_INT (INTVAL (cmp1) + p_info->const_add);
758 
759     }
760   else if (p_info->reverse_regs)
761     {
762       rtx temp = cmp0;
763       cmp0 = cmp1;
764       cmp1 = temp;
765     }
766 
767   return gen_rtx_fmt_ee (p_info->test_code, VOIDmode, cmp0, cmp1);
768 }
769 
770 
771 /* Generate the code to compare two float values.  The return value is
772    the comparison expression.  */
773 
774 static rtx
775 gen_float_relational (enum rtx_code test_code, /* relational test (EQ, etc) */
776 		      rtx cmp0, /* first operand to compare */
777 		      rtx cmp1 /* second operand to compare */)
778 {
779   rtx (*gen_fn) (rtx, rtx, rtx);
780   rtx brtmp;
781   int reverse_regs, invert;
782 
783   switch (test_code)
784     {
785     case EQ: reverse_regs = 0; invert = 0; gen_fn = gen_seq_sf; break;
786     case NE: reverse_regs = 0; invert = 1; gen_fn = gen_seq_sf; break;
787     case LE: reverse_regs = 0; invert = 0; gen_fn = gen_sle_sf; break;
788     case GT: reverse_regs = 1; invert = 0; gen_fn = gen_slt_sf; break;
789     case LT: reverse_regs = 0; invert = 0; gen_fn = gen_slt_sf; break;
790     case GE: reverse_regs = 1; invert = 0; gen_fn = gen_sle_sf; break;
791     case UNEQ: reverse_regs = 0; invert = 0; gen_fn = gen_suneq_sf; break;
792     case LTGT: reverse_regs = 0; invert = 1; gen_fn = gen_suneq_sf; break;
793     case UNLE: reverse_regs = 0; invert = 0; gen_fn = gen_sunle_sf; break;
794     case UNGT: reverse_regs = 1; invert = 0; gen_fn = gen_sunlt_sf; break;
795     case UNLT: reverse_regs = 0; invert = 0; gen_fn = gen_sunlt_sf; break;
796     case UNGE: reverse_regs = 1; invert = 0; gen_fn = gen_sunle_sf; break;
797     case UNORDERED:
798       reverse_regs = 0; invert = 0; gen_fn = gen_sunordered_sf; break;
799     case ORDERED:
800       reverse_regs = 0; invert = 1; gen_fn = gen_sunordered_sf; break;
801     default:
802       fatal_insn ("bad test", gen_rtx_fmt_ee (test_code, VOIDmode, cmp0, cmp1));
803       reverse_regs = 0; invert = 0; gen_fn = 0; /* avoid compiler warnings */
804     }
805 
806   if (reverse_regs)
807     {
808       rtx temp = cmp0;
809       cmp0 = cmp1;
810       cmp1 = temp;
811     }
812 
813   brtmp = gen_rtx_REG (CCmode, FPCC_REGNUM);
814   emit_insn (gen_fn (brtmp, cmp0, cmp1));
815 
816   return gen_rtx_fmt_ee (invert ? EQ : NE, VOIDmode, brtmp, const0_rtx);
817 }
818 
819 
820 void
821 xtensa_expand_conditional_branch (rtx *operands, machine_mode mode)
822 {
823   enum rtx_code test_code = GET_CODE (operands[0]);
824   rtx cmp0 = operands[1];
825   rtx cmp1 = operands[2];
826   rtx cmp;
827   int invert;
828   rtx label1, label2;
829 
830   switch (mode)
831     {
832     case E_DFmode:
833     default:
834       fatal_insn ("bad test", gen_rtx_fmt_ee (test_code, VOIDmode, cmp0, cmp1));
835 
836     case E_SImode:
837       invert = FALSE;
838       cmp = gen_int_relational (test_code, cmp0, cmp1, &invert);
839       break;
840 
841     case E_SFmode:
842       if (!TARGET_HARD_FLOAT)
843 	fatal_insn ("bad test", gen_rtx_fmt_ee (test_code, VOIDmode,
844 						cmp0, cmp1));
845       invert = FALSE;
846       cmp = gen_float_relational (test_code, cmp0, cmp1);
847       break;
848     }
849 
850   /* Generate the branch.  */
851 
852   label1 = gen_rtx_LABEL_REF (VOIDmode, operands[3]);
853   label2 = pc_rtx;
854 
855   if (invert)
856     {
857       label2 = label1;
858       label1 = pc_rtx;
859     }
860 
861   emit_jump_insn (gen_rtx_SET (pc_rtx,
862 			       gen_rtx_IF_THEN_ELSE (VOIDmode, cmp,
863 						     label1,
864 						     label2)));
865 }
866 
867 
868 static rtx
869 gen_conditional_move (enum rtx_code code, machine_mode mode,
870 		      rtx op0, rtx op1)
871 {
872   if (mode == SImode)
873     {
874       rtx cmp;
875 
876       /* Jump optimization calls get_condition() which canonicalizes
877 	 comparisons like (GE x <const>) to (GT x <const-1>).
878 	 Transform those comparisons back to GE, since that is the
879 	 comparison supported in Xtensa.  We shouldn't have to
880 	 transform <LE x const> comparisons, because neither
881 	 xtensa_expand_conditional_branch() nor get_condition() will
882 	 produce them.  */
883 
884       if ((code == GT) && (op1 == constm1_rtx))
885 	{
886 	  code = GE;
887 	  op1 = const0_rtx;
888 	}
889       cmp = gen_rtx_fmt_ee (code, VOIDmode, cc0_rtx, const0_rtx);
890 
891       if (boolean_operator (cmp, VOIDmode))
892 	{
893 	  /* Swap the operands to make const0 second.  */
894 	  if (op0 == const0_rtx)
895 	    {
896 	      op0 = op1;
897 	      op1 = const0_rtx;
898 	    }
899 
900 	  /* If not comparing against zero, emit a comparison (subtract).  */
901 	  if (op1 != const0_rtx)
902 	    {
903 	      op0 = expand_binop (SImode, sub_optab, op0, op1,
904 				  0, 0, OPTAB_LIB_WIDEN);
905 	      op1 = const0_rtx;
906 	    }
907 	}
908       else if (branch_operator (cmp, VOIDmode))
909 	{
910 	  /* Swap the operands to make const0 second.  */
911 	  if (op0 == const0_rtx)
912 	    {
913 	      op0 = op1;
914 	      op1 = const0_rtx;
915 
916 	      switch (code)
917 		{
918 		case LT: code = GE; break;
919 		case GE: code = LT; break;
920 		default: gcc_unreachable ();
921 		}
922 	    }
923 
924 	  if (op1 != const0_rtx)
925 	    return 0;
926 	}
927       else
928 	return 0;
929 
930       return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
931     }
932 
933   if (TARGET_HARD_FLOAT && mode == SFmode)
934     return gen_float_relational (code, op0, op1);
935 
936   return 0;
937 }
938 
939 
940 int
941 xtensa_expand_conditional_move (rtx *operands, int isflt)
942 {
943   rtx dest = operands[0];
944   rtx cmp = operands[1];
945   machine_mode cmp_mode = GET_MODE (XEXP (cmp, 0));
946   rtx (*gen_fn) (rtx, rtx, rtx, rtx, rtx);
947 
948   if (!(cmp = gen_conditional_move (GET_CODE (cmp), cmp_mode,
949 				    XEXP (cmp, 0), XEXP (cmp, 1))))
950     return 0;
951 
952   if (isflt)
953     gen_fn = (cmp_mode == SImode
954 	      ? gen_movsfcc_internal0
955 	      : gen_movsfcc_internal1);
956   else
957     gen_fn = (cmp_mode == SImode
958 	      ? gen_movsicc_internal0
959 	      : gen_movsicc_internal1);
960 
961   emit_insn (gen_fn (dest, XEXP (cmp, 0), operands[2], operands[3], cmp));
962   return 1;
963 }
964 
965 
966 int
967 xtensa_expand_scc (rtx operands[4], machine_mode cmp_mode)
968 {
969   rtx dest = operands[0];
970   rtx cmp;
971   rtx one_tmp, zero_tmp;
972   rtx (*gen_fn) (rtx, rtx, rtx, rtx, rtx);
973 
974   if (!(cmp = gen_conditional_move (GET_CODE (operands[1]), cmp_mode,
975 				    operands[2], operands[3])))
976     return 0;
977 
978   one_tmp = gen_reg_rtx (SImode);
979   zero_tmp = gen_reg_rtx (SImode);
980   emit_insn (gen_movsi (one_tmp, const_true_rtx));
981   emit_insn (gen_movsi (zero_tmp, const0_rtx));
982 
983   gen_fn = (cmp_mode == SImode
984 	    ? gen_movsicc_internal0
985 	    : gen_movsicc_internal1);
986   emit_insn (gen_fn (dest, XEXP (cmp, 0), one_tmp, zero_tmp, cmp));
987   return 1;
988 }
989 
990 
991 /* Split OP[1] into OP[2,3] and likewise for OP[0] into OP[0,1].  MODE is
992    for the output, i.e., the input operands are twice as big as MODE.  */
993 
994 void
995 xtensa_split_operand_pair (rtx operands[4], machine_mode mode)
996 {
997   switch (GET_CODE (operands[1]))
998     {
999     case REG:
1000       operands[3] = gen_rtx_REG (mode, REGNO (operands[1]) + 1);
1001       operands[2] = gen_rtx_REG (mode, REGNO (operands[1]));
1002       break;
1003 
1004     case MEM:
1005       operands[3] = adjust_address (operands[1], mode, GET_MODE_SIZE (mode));
1006       operands[2] = adjust_address (operands[1], mode, 0);
1007       break;
1008 
1009     case CONST_INT:
1010     case CONST_DOUBLE:
1011       split_double (operands[1], &operands[2], &operands[3]);
1012       break;
1013 
1014     default:
1015       gcc_unreachable ();
1016     }
1017 
1018   switch (GET_CODE (operands[0]))
1019     {
1020     case REG:
1021       operands[1] = gen_rtx_REG (mode, REGNO (operands[0]) + 1);
1022       operands[0] = gen_rtx_REG (mode, REGNO (operands[0]));
1023       break;
1024 
1025     case MEM:
1026       operands[1] = adjust_address (operands[0], mode, GET_MODE_SIZE (mode));
1027       operands[0] = adjust_address (operands[0], mode, 0);
1028       break;
1029 
1030     default:
1031       gcc_unreachable ();
1032     }
1033 }
1034 
1035 
1036 /* Emit insns to move operands[1] into operands[0].
1037    Return 1 if we have written out everything that needs to be done to
1038    do the move.  Otherwise, return 0 and the caller will emit the move
1039    normally.  */
1040 
1041 int
1042 xtensa_emit_move_sequence (rtx *operands, machine_mode mode)
1043 {
1044   rtx src = operands[1];
1045 
1046   if (CONSTANT_P (src)
1047       && (GET_CODE (src) != CONST_INT || ! xtensa_simm12b (INTVAL (src))))
1048     {
1049       rtx dst = operands[0];
1050 
1051       if (xtensa_tls_referenced_p (src))
1052 	{
1053 	  rtx addend = NULL;
1054 
1055 	  if (GET_CODE (src) == CONST && GET_CODE (XEXP (src, 0)) == PLUS)
1056 	    {
1057 	      addend = XEXP (XEXP (src, 0), 1);
1058 	      src = XEXP (XEXP (src, 0), 0);
1059 	    }
1060 
1061 	  src = xtensa_legitimize_tls_address (src);
1062 	  if (addend)
1063 	    {
1064 	      src = gen_rtx_PLUS (mode, src, addend);
1065 	      src = force_operand (src, dst);
1066 	    }
1067 	  emit_move_insn (dst, src);
1068 	  return 1;
1069 	}
1070 
1071       if (! TARGET_AUTO_LITPOOLS && ! TARGET_CONST16)
1072 	{
1073 	  src = force_const_mem (SImode, src);
1074 	  operands[1] = src;
1075 	}
1076 
1077       /* PC-relative loads are always SImode, and CONST16 is only
1078 	 supported in the movsi pattern, so add a SUBREG for any other
1079 	 (smaller) mode.  */
1080 
1081       if (mode != SImode)
1082 	{
1083 	  if (register_operand (dst, mode))
1084 	    {
1085 	      emit_move_insn (simplify_gen_subreg (SImode, dst, mode, 0), src);
1086 	      return 1;
1087 	    }
1088 	  else
1089 	    {
1090 	      src = force_reg (SImode, src);
1091 	      src = gen_lowpart_SUBREG (mode, src);
1092 	      operands[1] = src;
1093 	    }
1094 	}
1095     }
1096 
1097   if (!(reload_in_progress | reload_completed)
1098       && !xtensa_valid_move (mode, operands))
1099     operands[1] = force_reg (mode, operands[1]);
1100 
1101   operands[1] = xtensa_copy_incoming_a7 (operands[1]);
1102 
1103   /* During reload we don't want to emit (subreg:X (mem:Y)) since that
1104      instruction won't be recognized after reload, so we remove the
1105      subreg and adjust mem accordingly.  */
1106   if (reload_in_progress)
1107     {
1108       operands[0] = fixup_subreg_mem (operands[0]);
1109       operands[1] = fixup_subreg_mem (operands[1]);
1110     }
1111   return 0;
1112 }
1113 
1114 
1115 static rtx
1116 fixup_subreg_mem (rtx x)
1117 {
1118   if (GET_CODE (x) == SUBREG
1119       && GET_CODE (SUBREG_REG (x)) == REG
1120       && REGNO (SUBREG_REG (x)) >= FIRST_PSEUDO_REGISTER)
1121     {
1122       rtx temp =
1123 	gen_rtx_SUBREG (GET_MODE (x),
1124 			reg_equiv_mem (REGNO (SUBREG_REG (x))),
1125 			SUBREG_BYTE (x));
1126       x = alter_subreg (&temp, true);
1127     }
1128   return x;
1129 }
1130 
1131 
1132 /* Check if an incoming argument in a7 is expected to be used soon and
1133    if OPND is a register or register pair that includes a7.  If so,
1134    create a new pseudo and copy a7 into that pseudo at the very
1135    beginning of the function, followed by the special "set_frame_ptr"
1136    unspec_volatile insn.  The return value is either the original
1137    operand, if it is not a7, or the new pseudo containing a copy of
1138    the incoming argument.  This is necessary because the register
1139    allocator will ignore conflicts with a7 and may either assign some
1140    other pseudo to a7 or use a7 as the hard_frame_pointer, clobbering
1141    the incoming argument in a7.  By copying the argument out of a7 as
1142    the very first thing, and then immediately following that with an
1143    unspec_volatile to keep the scheduler away, we should avoid any
1144    problems.  Putting the set_frame_ptr insn at the beginning, with
1145    only the a7 copy before it, also makes it easier for the prologue
1146    expander to initialize the frame pointer after the a7 copy and to
1147    fix up the a7 copy to use the stack pointer instead of the frame
1148    pointer.  */
1149 
1150 rtx
1151 xtensa_copy_incoming_a7 (rtx opnd)
1152 {
1153   rtx entry_insns = 0;
1154   rtx reg, tmp;
1155   machine_mode mode;
1156 
1157   if (!cfun->machine->need_a7_copy)
1158     return opnd;
1159 
1160   /* This function should never be called again once a7 has been copied.  */
1161   gcc_assert (!cfun->machine->set_frame_ptr_insn);
1162 
1163   mode = GET_MODE (opnd);
1164 
1165   /* The operand using a7 may come in a later instruction, so just return
1166      the original operand if it doesn't use a7.  */
1167   reg = opnd;
1168   if (GET_CODE (reg) == SUBREG)
1169     {
1170       gcc_assert (SUBREG_BYTE (reg) == 0);
1171       reg = SUBREG_REG (reg);
1172     }
1173   if (GET_CODE (reg) != REG
1174       || REGNO (reg) > A7_REG
1175       || REGNO (reg) + hard_regno_nregs (A7_REG, mode) <= A7_REG)
1176     return opnd;
1177 
1178   /* 1-word args will always be in a7; 2-word args in a6/a7.  */
1179   gcc_assert (REGNO (reg) + hard_regno_nregs (A7_REG, mode) - 1 == A7_REG);
1180 
1181   cfun->machine->need_a7_copy = false;
1182 
1183   /* Copy a7 to a new pseudo at the function entry.  Use gen_raw_REG to
1184      create the REG for a7 so that hard_frame_pointer_rtx is not used.  */
1185 
1186   start_sequence ();
1187   tmp = gen_reg_rtx (mode);
1188 
1189   switch (mode)
1190     {
1191     case E_DFmode:
1192     case E_DImode:
1193       /* Copy the value out of A7 here but keep the first word in A6 until
1194 	 after the set_frame_ptr insn.  Otherwise, the register allocator
1195 	 may decide to put "subreg (tmp, 0)" in A7 and clobber the incoming
1196 	 value.  */
1197       emit_insn (gen_movsi_internal (gen_rtx_SUBREG (SImode, tmp, 4),
1198 				     gen_raw_REG (SImode, A7_REG)));
1199       break;
1200     case E_SFmode:
1201       emit_insn (gen_movsf_internal (tmp, gen_raw_REG (mode, A7_REG)));
1202       break;
1203     case E_SImode:
1204       emit_insn (gen_movsi_internal (tmp, gen_raw_REG (mode, A7_REG)));
1205       break;
1206     case E_HImode:
1207       emit_insn (gen_movhi_internal (tmp, gen_raw_REG (mode, A7_REG)));
1208       break;
1209     case E_QImode:
1210       emit_insn (gen_movqi_internal (tmp, gen_raw_REG (mode, A7_REG)));
1211       break;
1212     default:
1213       gcc_unreachable ();
1214     }
1215 
1216   cfun->machine->set_frame_ptr_insn = emit_insn (gen_set_frame_ptr ());
1217 
1218   /* For DF and DI mode arguments, copy the incoming value in A6 now.  */
1219   if (mode == DFmode || mode == DImode)
1220     emit_insn (gen_movsi_internal (gen_rtx_SUBREG (SImode, tmp, 0),
1221 				   gen_rtx_REG (SImode, A7_REG - 1)));
1222   entry_insns = get_insns ();
1223   end_sequence ();
1224 
1225   if (cfun->machine->vararg_a7)
1226     {
1227       /* This is called from within builtin_saveregs, which will insert the
1228 	 saveregs code at the function entry, ahead of anything placed at
1229 	 the function entry now.  Instead, save the sequence to be inserted
1230 	 at the beginning of the saveregs code.  */
1231       cfun->machine->vararg_a7_copy = entry_insns;
1232     }
1233   else
1234     {
1235       /* Put entry_insns after the NOTE that starts the function.  If
1236 	 this is inside a start_sequence, make the outer-level insn
1237 	 chain current, so the code is placed at the start of the
1238 	 function.  */
1239       push_topmost_sequence ();
1240       /* Do not use entry_of_function() here.  This is called from within
1241 	 expand_function_start, when the CFG still holds GIMPLE.  */
1242       emit_insn_after (entry_insns, get_insns ());
1243       pop_topmost_sequence ();
1244     }
1245 
1246   return tmp;
1247 }
1248 
1249 
1250 /* Try to expand a block move operation to a sequence of RTL move
1251    instructions.  If not optimizing, or if the block size is not a
1252    constant, or if the block is too large, the expansion fails and GCC
1253    falls back to calling memcpy().
1254 
1255    operands[0] is the destination
1256    operands[1] is the source
1257    operands[2] is the length
1258    operands[3] is the alignment */
1259 
1260 int
1261 xtensa_expand_block_move (rtx *operands)
1262 {
1263   static const machine_mode mode_from_align[] =
1264   {
1265     VOIDmode, QImode, HImode, VOIDmode, SImode,
1266   };
1267 
1268   rtx dst_mem = operands[0];
1269   rtx src_mem = operands[1];
1270   HOST_WIDE_INT bytes, align;
1271   int num_pieces, move_ratio;
1272   rtx temp[2];
1273   machine_mode mode[2];
1274   int amount[2];
1275   bool active[2];
1276   int phase = 0;
1277   int next;
1278   int offset_ld = 0;
1279   int offset_st = 0;
1280   rtx x;
1281 
1282   /* If this is not a fixed size move, just call memcpy.  */
1283   if (!optimize || (GET_CODE (operands[2]) != CONST_INT))
1284     return 0;
1285 
1286   bytes = INTVAL (operands[2]);
1287   align = INTVAL (operands[3]);
1288 
1289   /* Anything to move?  */
1290   if (bytes <= 0)
1291     return 0;
1292 
1293   if (align > MOVE_MAX)
1294     align = MOVE_MAX;
1295 
1296   /* Decide whether to expand inline based on the optimization level.  */
1297   move_ratio = 4;
1298   if (optimize > 2)
1299     move_ratio = LARGEST_MOVE_RATIO;
1300   num_pieces = (bytes / align) + (bytes % align); /* Close enough anyway.  */
1301   if (num_pieces > move_ratio)
1302     return 0;
1303 
1304   x = XEXP (dst_mem, 0);
1305   if (!REG_P (x))
1306     {
1307       x = force_reg (Pmode, x);
1308       dst_mem = replace_equiv_address (dst_mem, x);
1309     }
1310 
1311   x = XEXP (src_mem, 0);
1312   if (!REG_P (x))
1313     {
1314       x = force_reg (Pmode, x);
1315       src_mem = replace_equiv_address (src_mem, x);
1316     }
1317 
1318   active[0] = active[1] = false;
1319 
1320   do
1321     {
1322       next = phase;
1323       phase ^= 1;
1324 
1325       if (bytes > 0)
1326 	{
1327 	  int next_amount;
1328 
1329 	  next_amount = (bytes >= 4 ? 4 : (bytes >= 2 ? 2 : 1));
1330 	  next_amount = MIN (next_amount, align);
1331 
1332 	  amount[next] = next_amount;
1333 	  mode[next] = mode_from_align[next_amount];
1334 	  temp[next] = gen_reg_rtx (mode[next]);
1335 
1336 	  x = adjust_address (src_mem, mode[next], offset_ld);
1337 	  emit_insn (gen_rtx_SET (temp[next], x));
1338 
1339 	  offset_ld += next_amount;
1340 	  bytes -= next_amount;
1341 	  active[next] = true;
1342 	}
1343 
1344       if (active[phase])
1345 	{
1346 	  active[phase] = false;
1347 
1348 	  x = adjust_address (dst_mem, mode[phase], offset_st);
1349 	  emit_insn (gen_rtx_SET (x, temp[phase]));
1350 
1351 	  offset_st += amount[phase];
1352 	}
1353     }
1354   while (active[next]);
1355 
1356   return 1;
1357 }
1358 
1359 
1360 void
1361 xtensa_expand_nonlocal_goto (rtx *operands)
1362 {
1363   rtx goto_handler = operands[1];
1364   rtx containing_fp = operands[3];
1365 
1366   /* Generate a call to "__xtensa_nonlocal_goto" (in libgcc); the code
1367      is too big to generate in-line.  */
1368 
1369   if (GET_CODE (containing_fp) != REG)
1370     containing_fp = force_reg (Pmode, containing_fp);
1371 
1372   emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__xtensa_nonlocal_goto"),
1373 		     LCT_NORMAL, VOIDmode,
1374 		     containing_fp, Pmode,
1375 		     goto_handler, Pmode);
1376 }
1377 
1378 
1379 static struct machine_function *
1380 xtensa_init_machine_status (void)
1381 {
1382   return ggc_cleared_alloc<machine_function> ();
1383 }
1384 
1385 
1386 /* Shift VAL of mode MODE left by COUNT bits.  */
1387 
1388 static inline rtx
1389 xtensa_expand_mask_and_shift (rtx val, machine_mode mode, rtx count)
1390 {
1391   val = expand_simple_binop (SImode, AND, val, GEN_INT (GET_MODE_MASK (mode)),
1392 			     NULL_RTX, 1, OPTAB_DIRECT);
1393   return expand_simple_binop (SImode, ASHIFT, val, count,
1394 			      NULL_RTX, 1, OPTAB_DIRECT);
1395 }
1396 
1397 
1398 /* Structure to hold the initial parameters for a compare_and_swap operation
1399    in HImode and QImode.  */
1400 
1401 struct alignment_context
1402 {
1403   rtx memsi;	  /* SI aligned memory location.  */
1404   rtx shift;	  /* Bit offset with regard to lsb.  */
1405   rtx modemask;	  /* Mask of the HQImode shifted by SHIFT bits.  */
1406   rtx modemaski;  /* ~modemask */
1407 };
1408 
1409 
1410 /* Initialize structure AC for word access to HI and QI mode memory.  */
1411 
1412 static void
1413 init_alignment_context (struct alignment_context *ac, rtx mem)
1414 {
1415   machine_mode mode = GET_MODE (mem);
1416   rtx byteoffset = NULL_RTX;
1417   bool aligned = (MEM_ALIGN (mem) >= GET_MODE_BITSIZE (SImode));
1418 
1419   if (aligned)
1420     ac->memsi = adjust_address (mem, SImode, 0); /* Memory is aligned.  */
1421   else
1422     {
1423       /* Alignment is unknown.  */
1424       rtx addr, align;
1425 
1426       /* Force the address into a register.  */
1427       addr = force_reg (Pmode, XEXP (mem, 0));
1428 
1429       /* Align it to SImode.  */
1430       align = expand_simple_binop (Pmode, AND, addr,
1431 				   GEN_INT (-GET_MODE_SIZE (SImode)),
1432 				   NULL_RTX, 1, OPTAB_DIRECT);
1433       /* Generate MEM.  */
1434       ac->memsi = gen_rtx_MEM (SImode, align);
1435       MEM_VOLATILE_P (ac->memsi) = MEM_VOLATILE_P (mem);
1436       set_mem_alias_set (ac->memsi, ALIAS_SET_MEMORY_BARRIER);
1437       set_mem_align (ac->memsi, GET_MODE_BITSIZE (SImode));
1438 
1439       byteoffset = expand_simple_binop (Pmode, AND, addr,
1440 					GEN_INT (GET_MODE_SIZE (SImode) - 1),
1441 					NULL_RTX, 1, OPTAB_DIRECT);
1442     }
1443 
1444   /* Calculate shiftcount.  */
1445   if (TARGET_BIG_ENDIAN)
1446     {
1447       ac->shift = GEN_INT (GET_MODE_SIZE (SImode) - GET_MODE_SIZE (mode));
1448       if (!aligned)
1449 	ac->shift = expand_simple_binop (SImode, MINUS, ac->shift, byteoffset,
1450 					 NULL_RTX, 1, OPTAB_DIRECT);
1451     }
1452   else
1453     {
1454       if (aligned)
1455 	ac->shift = NULL_RTX;
1456       else
1457 	ac->shift = byteoffset;
1458     }
1459 
1460   if (ac->shift != NULL_RTX)
1461     {
1462       /* Shift is the byte count, but we need the bitcount.  */
1463       gcc_assert (exact_log2 (BITS_PER_UNIT) >= 0);
1464       ac->shift = expand_simple_binop (SImode, ASHIFT, ac->shift,
1465 				       GEN_INT (exact_log2 (BITS_PER_UNIT)),
1466 				       NULL_RTX, 1, OPTAB_DIRECT);
1467       ac->modemask = expand_simple_binop (SImode, ASHIFT,
1468 					  GEN_INT (GET_MODE_MASK (mode)),
1469 					  ac->shift,
1470 					  NULL_RTX, 1, OPTAB_DIRECT);
1471     }
1472   else
1473     ac->modemask = GEN_INT (GET_MODE_MASK (mode));
1474 
1475   ac->modemaski = expand_simple_unop (SImode, NOT, ac->modemask, NULL_RTX, 1);
1476 }
1477 
1478 
1479 /* Expand an atomic compare and swap operation for HImode and QImode.
1480    MEM is the memory location, CMP the old value to compare MEM with
1481    and NEW_RTX the value to set if CMP == MEM.  */
1482 
1483 void
1484 xtensa_expand_compare_and_swap (rtx target, rtx mem, rtx cmp, rtx new_rtx)
1485 {
1486   machine_mode mode = GET_MODE (mem);
1487   struct alignment_context ac;
1488   rtx tmp, cmpv, newv, val;
1489   rtx oldval = gen_reg_rtx (SImode);
1490   rtx res = gen_reg_rtx (SImode);
1491   rtx_code_label *csloop = gen_label_rtx ();
1492   rtx_code_label *csend = gen_label_rtx ();
1493 
1494   init_alignment_context (&ac, mem);
1495 
1496   if (ac.shift != NULL_RTX)
1497     {
1498       cmp = xtensa_expand_mask_and_shift (cmp, mode, ac.shift);
1499       new_rtx = xtensa_expand_mask_and_shift (new_rtx, mode, ac.shift);
1500     }
1501 
1502   /* Load the surrounding word into VAL with the MEM value masked out.  */
1503   val = force_reg (SImode, expand_simple_binop (SImode, AND, ac.memsi,
1504 						ac.modemaski, NULL_RTX, 1,
1505 						OPTAB_DIRECT));
1506   emit_label (csloop);
1507 
1508   /* Patch CMP and NEW_RTX into VAL at correct position.  */
1509   cmpv = force_reg (SImode, expand_simple_binop (SImode, IOR, cmp, val,
1510 						 NULL_RTX, 1, OPTAB_DIRECT));
1511   newv = force_reg (SImode, expand_simple_binop (SImode, IOR, new_rtx, val,
1512 						 NULL_RTX, 1, OPTAB_DIRECT));
1513 
1514   /* Jump to end if we're done.  */
1515   emit_insn (gen_sync_compare_and_swapsi (res, ac.memsi, cmpv, newv));
1516   emit_cmp_and_jump_insns (res, cmpv, EQ, const0_rtx, SImode, true, csend);
1517 
1518   /* Check for changes outside mode.  */
1519   emit_move_insn (oldval, val);
1520   tmp = expand_simple_binop (SImode, AND, res, ac.modemaski,
1521 			     val, 1, OPTAB_DIRECT);
1522   if (tmp != val)
1523     emit_move_insn (val, tmp);
1524 
1525   /* Loop internal if so.  */
1526   emit_cmp_and_jump_insns (oldval, val, NE, const0_rtx, SImode, true, csloop);
1527 
1528   emit_label (csend);
1529 
1530   /* Return the correct part of the bitfield.  */
1531   convert_move (target,
1532 		(ac.shift == NULL_RTX ? res
1533 		 : expand_simple_binop (SImode, LSHIFTRT, res, ac.shift,
1534 					NULL_RTX, 1, OPTAB_DIRECT)),
1535 		1);
1536 }
1537 
1538 
1539 /* Expand an atomic operation CODE of mode MODE (either HImode or QImode --
1540    the default expansion works fine for SImode).  MEM is the memory location
1541    and VAL the value to play with.  If AFTER is true then store the value
1542    MEM holds after the operation, if AFTER is false then store the value MEM
1543    holds before the operation.  If TARGET is zero then discard that value, else
1544    store it to TARGET.  */
1545 
1546 void
1547 xtensa_expand_atomic (enum rtx_code code, rtx target, rtx mem, rtx val,
1548 		      bool after)
1549 {
1550   machine_mode mode = GET_MODE (mem);
1551   struct alignment_context ac;
1552   rtx_code_label *csloop = gen_label_rtx ();
1553   rtx cmp, tmp;
1554   rtx old = gen_reg_rtx (SImode);
1555   rtx new_rtx = gen_reg_rtx (SImode);
1556   rtx orig = NULL_RTX;
1557 
1558   init_alignment_context (&ac, mem);
1559 
1560   /* Prepare values before the compare-and-swap loop.  */
1561   if (ac.shift != NULL_RTX)
1562     val = xtensa_expand_mask_and_shift (val, mode, ac.shift);
1563   switch (code)
1564     {
1565     case PLUS:
1566     case MINUS:
1567       orig = gen_reg_rtx (SImode);
1568       convert_move (orig, val, 1);
1569       break;
1570 
1571     case SET:
1572     case IOR:
1573     case XOR:
1574       break;
1575 
1576     case MULT: /* NAND */
1577     case AND:
1578       /* val = "11..1<val>11..1" */
1579       val = expand_simple_binop (SImode, XOR, val, ac.modemaski,
1580 				 NULL_RTX, 1, OPTAB_DIRECT);
1581       break;
1582 
1583     default:
1584       gcc_unreachable ();
1585     }
1586 
1587   /* Load full word.  Subsequent loads are performed by S32C1I.  */
1588   cmp = force_reg (SImode, ac.memsi);
1589 
1590   emit_label (csloop);
1591   emit_move_insn (old, cmp);
1592 
1593   switch (code)
1594     {
1595     case PLUS:
1596     case MINUS:
1597       val = expand_simple_binop (SImode, code, old, orig,
1598 				 NULL_RTX, 1, OPTAB_DIRECT);
1599       val = expand_simple_binop (SImode, AND, val, ac.modemask,
1600 				 NULL_RTX, 1, OPTAB_DIRECT);
1601       /* FALLTHRU */
1602     case SET:
1603       tmp = expand_simple_binop (SImode, AND, old, ac.modemaski,
1604 				 NULL_RTX, 1, OPTAB_DIRECT);
1605       tmp = expand_simple_binop (SImode, IOR, tmp, val,
1606 				 new_rtx, 1, OPTAB_DIRECT);
1607       break;
1608 
1609     case AND:
1610     case IOR:
1611     case XOR:
1612       tmp = expand_simple_binop (SImode, code, old, val,
1613 				 new_rtx, 1, OPTAB_DIRECT);
1614       break;
1615 
1616     case MULT: /* NAND */
1617       tmp = expand_simple_binop (SImode, AND, old, val,
1618 				 NULL_RTX, 1, OPTAB_DIRECT);
1619       tmp = expand_simple_binop (SImode, XOR, tmp, ac.modemask,
1620 				 new_rtx, 1, OPTAB_DIRECT);
1621       break;
1622 
1623     default:
1624       gcc_unreachable ();
1625     }
1626 
1627   if (tmp != new_rtx)
1628     emit_move_insn (new_rtx, tmp);
1629   emit_insn (gen_sync_compare_and_swapsi (cmp, ac.memsi, old, new_rtx));
1630   emit_cmp_and_jump_insns (cmp, old, NE, const0_rtx, SImode, true, csloop);
1631 
1632   if (target)
1633     {
1634       tmp = (after ? new_rtx : cmp);
1635       convert_move (target,
1636 		    (ac.shift == NULL_RTX ? tmp
1637 		     : expand_simple_binop (SImode, LSHIFTRT, tmp, ac.shift,
1638 					    NULL_RTX, 1, OPTAB_DIRECT)),
1639 		    1);
1640     }
1641 }
1642 
1643 
1644 void
1645 xtensa_setup_frame_addresses (void)
1646 {
1647   /* Set flag to cause TARGET_FRAME_POINTER_REQUIRED to return true.  */
1648   cfun->machine->accesses_prev_frame = 1;
1649 
1650   if (TARGET_WINDOWED_ABI)
1651     emit_library_call
1652       (gen_rtx_SYMBOL_REF (Pmode, "__xtensa_libgcc_window_spill"),
1653        LCT_NORMAL, VOIDmode);
1654 }
1655 
1656 
1657 /* Emit the assembly for the end of a zero-cost loop.  Normally we just emit
1658    a comment showing where the end of the loop is.  However, if there is a
1659    label or a branch at the end of the loop then we need to place a nop
1660    there.  If the loop ends with a label we need the nop so that branches
1661    targeting that label will target the nop (and thus remain in the loop),
1662    instead of targeting the instruction after the loop (and thus exiting
1663    the loop).  If the loop ends with a branch, we need the nop in case the
1664    branch is targeting a location inside the loop.  When the branch
1665    executes it will cause the loop count to be decremented even if it is
1666    taken (because it is the last instruction in the loop), so we need to
1667    nop after the branch to prevent the loop count from being decremented
1668    when the branch is taken.  */
1669 
1670 void
1671 xtensa_emit_loop_end (rtx_insn *insn, rtx *operands)
1672 {
1673   char done = 0;
1674 
1675   for (insn = PREV_INSN (insn); insn && !done; insn = PREV_INSN (insn))
1676     {
1677       switch (GET_CODE (insn))
1678 	{
1679 	case NOTE:
1680 	case BARRIER:
1681 	  break;
1682 
1683 	case CODE_LABEL:
1684 	  output_asm_insn (TARGET_DENSITY ? "nop.n" : "nop", operands);
1685 	  done = 1;
1686 	  break;
1687 
1688 	default:
1689 	  {
1690 	    rtx body = PATTERN (insn);
1691 
1692 	    if (JUMP_P (body))
1693 	      {
1694 		output_asm_insn (TARGET_DENSITY ? "nop.n" : "nop", operands);
1695 		done = 1;
1696 	      }
1697 	    else if ((GET_CODE (body) != USE)
1698 		     && (GET_CODE (body) != CLOBBER))
1699 	      done = 1;
1700 	  }
1701 	  break;
1702         }
1703     }
1704 
1705   output_asm_insn ("%1_LEND:", operands);
1706 }
1707 
1708 
1709 char *
1710 xtensa_emit_branch (bool inverted, bool immed, rtx *operands)
1711 {
1712   static char result[64];
1713   enum rtx_code code;
1714   const char *op;
1715 
1716   code = GET_CODE (operands[3]);
1717   switch (code)
1718     {
1719     case EQ:	op = inverted ? "ne" : "eq"; break;
1720     case NE:	op = inverted ? "eq" : "ne"; break;
1721     case LT:	op = inverted ? "ge" : "lt"; break;
1722     case GE:	op = inverted ? "lt" : "ge"; break;
1723     case LTU:	op = inverted ? "geu" : "ltu"; break;
1724     case GEU:	op = inverted ? "ltu" : "geu"; break;
1725     default:	gcc_unreachable ();
1726     }
1727 
1728   if (immed)
1729     {
1730       if (INTVAL (operands[1]) == 0)
1731 	sprintf (result, "b%sz%s\t%%0, %%2", op,
1732 		 (TARGET_DENSITY && (code == EQ || code == NE)) ? ".n" : "");
1733       else
1734 	sprintf (result, "b%si\t%%0, %%d1, %%2", op);
1735     }
1736   else
1737     sprintf (result, "b%s\t%%0, %%1, %%2", op);
1738 
1739   return result;
1740 }
1741 
1742 
1743 char *
1744 xtensa_emit_bit_branch (bool inverted, bool immed, rtx *operands)
1745 {
1746   static char result[64];
1747   const char *op;
1748 
1749   switch (GET_CODE (operands[3]))
1750     {
1751     case EQ:	op = inverted ? "bs" : "bc"; break;
1752     case NE:	op = inverted ? "bc" : "bs"; break;
1753     default:	gcc_unreachable ();
1754     }
1755 
1756   if (immed)
1757     {
1758       unsigned bitnum = INTVAL (operands[1]) & 0x1f;
1759       operands[1] = GEN_INT (bitnum);
1760       sprintf (result, "b%si\t%%0, %%d1, %%2", op);
1761     }
1762   else
1763     sprintf (result, "b%s\t%%0, %%1, %%2", op);
1764 
1765   return result;
1766 }
1767 
1768 
1769 char *
1770 xtensa_emit_movcc (bool inverted, bool isfp, bool isbool, rtx *operands)
1771 {
1772   static char result[64];
1773   enum rtx_code code;
1774   const char *op;
1775 
1776   code = GET_CODE (operands[4]);
1777   if (isbool)
1778     {
1779       switch (code)
1780 	{
1781 	case EQ:	op = inverted ? "t" : "f"; break;
1782 	case NE:	op = inverted ? "f" : "t"; break;
1783 	default:	gcc_unreachable ();
1784 	}
1785     }
1786   else
1787     {
1788       switch (code)
1789 	{
1790 	case EQ:	op = inverted ? "nez" : "eqz"; break;
1791 	case NE:	op = inverted ? "eqz" : "nez"; break;
1792 	case LT:	op = inverted ? "gez" : "ltz"; break;
1793 	case GE:	op = inverted ? "ltz" : "gez"; break;
1794 	default:	gcc_unreachable ();
1795 	}
1796     }
1797 
1798   sprintf (result, "mov%s%s\t%%0, %%%d, %%1",
1799 	   op, isfp ? ".s" : "", inverted ? 3 : 2);
1800   return result;
1801 }
1802 
1803 
1804 char *
1805 xtensa_emit_call (int callop, rtx *operands)
1806 {
1807   static char result[64];
1808   rtx tgt = operands[callop];
1809 
1810   if (GET_CODE (tgt) == CONST_INT)
1811     sprintf (result, "call%d\t" HOST_WIDE_INT_PRINT_HEX,
1812 	     WINDOW_SIZE, INTVAL (tgt));
1813   else if (register_operand (tgt, VOIDmode))
1814     sprintf (result, "callx%d\t%%%d", WINDOW_SIZE, callop);
1815   else
1816     sprintf (result, "call%d\t%%%d", WINDOW_SIZE, callop);
1817 
1818   return result;
1819 }
1820 
1821 
1822 bool
1823 xtensa_legitimate_address_p (machine_mode mode, rtx addr, bool strict)
1824 {
1825   /* Allow constant pool addresses.  */
1826   if (mode != BLKmode && GET_MODE_SIZE (mode) >= UNITS_PER_WORD
1827       && ! TARGET_CONST16 && constantpool_address_p (addr)
1828       && ! xtensa_tls_referenced_p (addr))
1829     return true;
1830 
1831   while (GET_CODE (addr) == SUBREG)
1832     addr = SUBREG_REG (addr);
1833 
1834   /* Allow base registers.  */
1835   if (GET_CODE (addr) == REG && BASE_REG_P (addr, strict))
1836     return true;
1837 
1838   /* Check for "register + offset" addressing.  */
1839   if (GET_CODE (addr) == PLUS)
1840     {
1841       rtx xplus0 = XEXP (addr, 0);
1842       rtx xplus1 = XEXP (addr, 1);
1843       enum rtx_code code0;
1844       enum rtx_code code1;
1845 
1846       while (GET_CODE (xplus0) == SUBREG)
1847 	xplus0 = SUBREG_REG (xplus0);
1848       code0 = GET_CODE (xplus0);
1849 
1850       while (GET_CODE (xplus1) == SUBREG)
1851 	xplus1 = SUBREG_REG (xplus1);
1852       code1 = GET_CODE (xplus1);
1853 
1854       /* Swap operands if necessary so the register is first.  */
1855       if (code0 != REG && code1 == REG)
1856 	{
1857 	  xplus0 = XEXP (addr, 1);
1858 	  xplus1 = XEXP (addr, 0);
1859 	  code0 = GET_CODE (xplus0);
1860 	  code1 = GET_CODE (xplus1);
1861 	}
1862 
1863       if (code0 == REG && BASE_REG_P (xplus0, strict)
1864 	  && code1 == CONST_INT
1865 	  && xtensa_mem_offset (INTVAL (xplus1), mode))
1866 	return true;
1867     }
1868 
1869   return false;
1870 }
1871 
1872 
1873 /* Construct the SYMBOL_REF for the _TLS_MODULE_BASE_ symbol.  */
1874 
1875 static GTY(()) rtx xtensa_tls_module_base_symbol;
1876 
1877 static rtx
1878 xtensa_tls_module_base (void)
1879 {
1880   if (! xtensa_tls_module_base_symbol)
1881     {
1882       xtensa_tls_module_base_symbol =
1883 	gen_rtx_SYMBOL_REF (Pmode, "_TLS_MODULE_BASE_");
1884       SYMBOL_REF_FLAGS (xtensa_tls_module_base_symbol)
1885         |= TLS_MODEL_GLOBAL_DYNAMIC << SYMBOL_FLAG_TLS_SHIFT;
1886     }
1887 
1888   return xtensa_tls_module_base_symbol;
1889 }
1890 
1891 
1892 static rtx_insn *
1893 xtensa_call_tls_desc (rtx sym, rtx *retp)
1894 {
1895   rtx fn, arg, a_io;
1896   rtx_insn *call_insn, *insns;
1897 
1898   start_sequence ();
1899   fn = gen_reg_rtx (Pmode);
1900   arg = gen_reg_rtx (Pmode);
1901   a_io = gen_rtx_REG (Pmode, WINDOW_SIZE + 2);
1902 
1903   emit_insn (gen_tls_func (fn, sym));
1904   emit_insn (gen_tls_arg (arg, sym));
1905   emit_move_insn (a_io, arg);
1906   call_insn = emit_call_insn (gen_tls_call (a_io, fn, sym, const1_rtx));
1907   use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), a_io);
1908   insns = get_insns ();
1909   end_sequence ();
1910 
1911   *retp = a_io;
1912   return insns;
1913 }
1914 
1915 
1916 static rtx
1917 xtensa_legitimize_tls_address (rtx x)
1918 {
1919   unsigned int model = SYMBOL_REF_TLS_MODEL (x);
1920   rtx dest, tp, ret, modbase, base, addend;
1921   rtx_insn *insns;
1922 
1923   dest = gen_reg_rtx (Pmode);
1924   switch (model)
1925     {
1926     case TLS_MODEL_GLOBAL_DYNAMIC:
1927       insns = xtensa_call_tls_desc (x, &ret);
1928       emit_libcall_block (insns, dest, ret, x);
1929       break;
1930 
1931     case TLS_MODEL_LOCAL_DYNAMIC:
1932       base = gen_reg_rtx (Pmode);
1933       modbase = xtensa_tls_module_base ();
1934       insns = xtensa_call_tls_desc (modbase, &ret);
1935       emit_libcall_block (insns, base, ret, modbase);
1936       addend = force_reg (SImode, gen_sym_DTPOFF (x));
1937       emit_insn (gen_addsi3 (dest, base, addend));
1938       break;
1939 
1940     case TLS_MODEL_INITIAL_EXEC:
1941     case TLS_MODEL_LOCAL_EXEC:
1942       tp = gen_reg_rtx (SImode);
1943       emit_insn (gen_get_thread_pointersi (tp));
1944       addend = force_reg (SImode, gen_sym_TPOFF (x));
1945       emit_insn (gen_addsi3 (dest, tp, addend));
1946       break;
1947 
1948     default:
1949       gcc_unreachable ();
1950     }
1951 
1952   return dest;
1953 }
1954 
1955 
1956 rtx
1957 xtensa_legitimize_address (rtx x,
1958 			   rtx oldx ATTRIBUTE_UNUSED,
1959 			   machine_mode mode)
1960 {
1961   if (xtensa_tls_symbol_p (x))
1962     return xtensa_legitimize_tls_address (x);
1963 
1964   if (GET_CODE (x) == PLUS)
1965     {
1966       rtx plus0 = XEXP (x, 0);
1967       rtx plus1 = XEXP (x, 1);
1968 
1969       if (GET_CODE (plus0) != REG && GET_CODE (plus1) == REG)
1970 	{
1971 	  plus0 = XEXP (x, 1);
1972 	  plus1 = XEXP (x, 0);
1973 	}
1974 
1975       /* Try to split up the offset to use an ADDMI instruction.  */
1976       if (GET_CODE (plus0) == REG
1977 	  && GET_CODE (plus1) == CONST_INT
1978 	  && !xtensa_mem_offset (INTVAL (plus1), mode)
1979 	  && !xtensa_simm8 (INTVAL (plus1))
1980 	  && xtensa_mem_offset (INTVAL (plus1) & 0xff, mode)
1981 	  && xtensa_simm8x256 (INTVAL (plus1) & ~0xff))
1982 	{
1983 	  rtx temp = gen_reg_rtx (Pmode);
1984 	  rtx addmi_offset = GEN_INT (INTVAL (plus1) & ~0xff);
1985 	  emit_insn (gen_rtx_SET (temp, gen_rtx_PLUS (Pmode, plus0,
1986 						      addmi_offset)));
1987 	  return gen_rtx_PLUS (Pmode, temp, GEN_INT (INTVAL (plus1) & 0xff));
1988 	}
1989     }
1990 
1991   return x;
1992 }
1993 
1994 /* Worker function for TARGET_MODE_DEPENDENT_ADDRESS_P.
1995 
1996    Treat constant-pool references as "mode dependent" since they can
1997    only be accessed with SImode loads.  This works around a bug in the
1998    combiner where a constant pool reference is temporarily converted
1999    to an HImode load, which is then assumed to zero-extend based on
2000    our definition of LOAD_EXTEND_OP.  This is wrong because the high
2001    bits of a 16-bit value in the constant pool are now sign-extended
2002    by default.  */
2003 
2004 static bool
2005 xtensa_mode_dependent_address_p (const_rtx addr,
2006 				 addr_space_t as ATTRIBUTE_UNUSED)
2007 {
2008   return constantpool_address_p (addr);
2009 }
2010 
2011 /* Return TRUE if X contains any TLS symbol references.  */
2012 
2013 bool
2014 xtensa_tls_referenced_p (rtx x)
2015 {
2016   if (! TARGET_HAVE_TLS)
2017     return false;
2018 
2019   subrtx_iterator::array_type array;
2020   FOR_EACH_SUBRTX (iter, array, x, ALL)
2021     {
2022       const_rtx x = *iter;
2023       if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x) != 0)
2024 	return true;
2025 
2026       /* Ignore TLS references that have already been legitimized.  */
2027       if (GET_CODE (x) == UNSPEC)
2028 	switch (XINT (x, 1))
2029 	  {
2030 	  case UNSPEC_TPOFF:
2031 	  case UNSPEC_DTPOFF:
2032 	  case UNSPEC_TLS_FUNC:
2033 	  case UNSPEC_TLS_ARG:
2034 	  case UNSPEC_TLS_CALL:
2035 	    iter.skip_subrtxes ();
2036 	    break;
2037 	  default:
2038 	    break;
2039 	  }
2040     }
2041   return false;
2042 }
2043 
2044 
2045 /* Implement TARGET_CANNOT_FORCE_CONST_MEM.  */
2046 
2047 static bool
2048 xtensa_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
2049 {
2050   return xtensa_tls_referenced_p (x);
2051 }
2052 
2053 
2054 /* Return the debugger register number to use for 'regno'.  */
2055 
2056 int
2057 xtensa_dbx_register_number (int regno)
2058 {
2059   int first = -1;
2060 
2061   if (GP_REG_P (regno))
2062     {
2063       regno -= GP_REG_FIRST;
2064       first = 0;
2065     }
2066   else if (BR_REG_P (regno))
2067     {
2068       regno -= BR_REG_FIRST;
2069       first = 16;
2070     }
2071   else if (FP_REG_P (regno))
2072     {
2073       regno -= FP_REG_FIRST;
2074       first = 48;
2075     }
2076   else if (ACC_REG_P (regno))
2077     {
2078       first = 0x200;	/* Start of Xtensa special registers.  */
2079       regno = 16;	/* ACCLO is special register 16.  */
2080     }
2081 
2082   /* When optimizing, we sometimes get asked about pseudo-registers
2083      that don't represent hard registers.  Return 0 for these.  */
2084   if (first == -1)
2085     return 0;
2086 
2087   return first + regno;
2088 }
2089 
2090 
2091 /* Argument support functions.  */
2092 
2093 /* Initialize CUMULATIVE_ARGS for a function.  */
2094 
2095 void
2096 init_cumulative_args (CUMULATIVE_ARGS *cum, int incoming)
2097 {
2098   cum->arg_words = 0;
2099   cum->incoming = incoming;
2100 }
2101 
2102 
2103 /* Advance the argument to the next argument position.  */
2104 
2105 static void
2106 xtensa_function_arg_advance (cumulative_args_t cum, machine_mode mode,
2107 			     const_tree type, bool named ATTRIBUTE_UNUSED)
2108 {
2109   int words, max;
2110   int *arg_words;
2111 
2112   arg_words = &get_cumulative_args (cum)->arg_words;
2113   max = MAX_ARGS_IN_REGISTERS;
2114 
2115   words = (((mode != BLKmode)
2116 	    ? (int) GET_MODE_SIZE (mode)
2117 	    : int_size_in_bytes (type)) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
2118 
2119   if (*arg_words < max
2120       && (targetm.calls.must_pass_in_stack (mode, type)
2121 	  || *arg_words + words > max))
2122     *arg_words = max;
2123 
2124   *arg_words += words;
2125 }
2126 
2127 
2128 /* Return an RTL expression containing the register for the given mode,
2129    or 0 if the argument is to be passed on the stack.  INCOMING_P is nonzero
2130    if this is an incoming argument to the current function.  */
2131 
2132 static rtx
2133 xtensa_function_arg_1 (cumulative_args_t cum_v, machine_mode mode,
2134 		       const_tree type, bool incoming_p)
2135 {
2136   CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
2137   int regbase, words, max;
2138   int *arg_words;
2139   int regno;
2140 
2141   arg_words = &cum->arg_words;
2142   regbase = (incoming_p ? GP_ARG_FIRST : GP_OUTGOING_ARG_FIRST);
2143   max = MAX_ARGS_IN_REGISTERS;
2144 
2145   words = (((mode != BLKmode)
2146 	    ? (int) GET_MODE_SIZE (mode)
2147 	    : int_size_in_bytes (type)) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
2148 
2149   if (type && (TYPE_ALIGN (type) > BITS_PER_WORD))
2150     {
2151       int align = MIN (TYPE_ALIGN (type), STACK_BOUNDARY) / BITS_PER_WORD;
2152       *arg_words = (*arg_words + align - 1) & -align;
2153     }
2154 
2155   if (*arg_words + words > max)
2156     return (rtx)0;
2157 
2158   regno = regbase + *arg_words;
2159 
2160   if (cum->incoming && regno <= A7_REG && regno + words > A7_REG)
2161     cfun->machine->need_a7_copy = TARGET_WINDOWED_ABI;
2162 
2163   return gen_rtx_REG (mode, regno);
2164 }
2165 
2166 /* Implement TARGET_FUNCTION_ARG.  */
2167 
2168 static rtx
2169 xtensa_function_arg (cumulative_args_t cum, machine_mode mode,
2170 		     const_tree type, bool named ATTRIBUTE_UNUSED)
2171 {
2172   return xtensa_function_arg_1 (cum, mode, type, false);
2173 }
2174 
2175 /* Implement TARGET_FUNCTION_INCOMING_ARG.  */
2176 
2177 static rtx
2178 xtensa_function_incoming_arg (cumulative_args_t cum, machine_mode mode,
2179 			      const_tree type, bool named ATTRIBUTE_UNUSED)
2180 {
2181   return xtensa_function_arg_1 (cum, mode, type, true);
2182 }
2183 
2184 static unsigned int
2185 xtensa_function_arg_boundary (machine_mode mode, const_tree type)
2186 {
2187   unsigned int alignment;
2188 
2189   alignment = type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode);
2190   if (alignment < PARM_BOUNDARY)
2191     alignment = PARM_BOUNDARY;
2192   if (alignment > STACK_BOUNDARY)
2193     alignment = STACK_BOUNDARY;
2194   return alignment;
2195 }
2196 
2197 
2198 static bool
2199 xtensa_return_in_msb (const_tree valtype)
2200 {
2201   return (TARGET_BIG_ENDIAN
2202 	  && AGGREGATE_TYPE_P (valtype)
2203 	  && int_size_in_bytes (valtype) >= UNITS_PER_WORD);
2204 }
2205 
2206 
2207 static void
2208 xtensa_option_override (void)
2209 {
2210   int regno;
2211   machine_mode mode;
2212 
2213   /* Use CONST16 in the absence of L32R.
2214      Set it in the TARGET_OPTION_OVERRIDE to avoid dependency on xtensa
2215      configuration in the xtensa-common.c  */
2216 
2217   if (!TARGET_L32R)
2218     target_flags |= MASK_CONST16;
2219 
2220   if (!TARGET_BOOLEANS && TARGET_HARD_FLOAT)
2221     error ("boolean registers required for the floating-point option");
2222 
2223   /* Set up array giving whether a given register can hold a given mode.  */
2224   for (mode = VOIDmode;
2225        mode != MAX_MACHINE_MODE;
2226        mode = (machine_mode) ((int) mode + 1))
2227     {
2228       int size = GET_MODE_SIZE (mode);
2229       enum mode_class mclass = GET_MODE_CLASS (mode);
2230 
2231       for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
2232 	{
2233 	  int temp;
2234 
2235 	  if (ACC_REG_P (regno))
2236 	    temp = (TARGET_MAC16
2237 		    && (mclass == MODE_INT) && (size <= UNITS_PER_WORD));
2238 	  else if (GP_REG_P (regno))
2239 	    temp = ((regno & 1) == 0 || (size <= UNITS_PER_WORD));
2240 	  else if (FP_REG_P (regno))
2241 	    temp = (TARGET_HARD_FLOAT && (mode == SFmode));
2242 	  else if (BR_REG_P (regno))
2243 	    temp = (TARGET_BOOLEANS && (mode == CCmode));
2244 	  else
2245 	    temp = FALSE;
2246 
2247 	  xtensa_hard_regno_mode_ok_p[(int) mode][regno] = temp;
2248 	}
2249     }
2250 
2251   init_machine_status = xtensa_init_machine_status;
2252 
2253   /* Check PIC settings.  PIC is only supported when using L32R
2254      instructions, and some targets need to always use PIC.  */
2255   if (flag_pic && TARGET_CONST16)
2256     error ("-f%s is not supported with CONST16 instructions",
2257 	   (flag_pic > 1 ? "PIC" : "pic"));
2258   else if (TARGET_FORCE_NO_PIC)
2259     flag_pic = 0;
2260   else if (XTENSA_ALWAYS_PIC)
2261     {
2262       if (TARGET_CONST16)
2263 	error ("PIC is required but not supported with CONST16 instructions");
2264       flag_pic = 1;
2265     }
2266   /* There's no need for -fPIC (as opposed to -fpic) on Xtensa.  */
2267   if (flag_pic > 1)
2268     flag_pic = 1;
2269   if (flag_pic && !flag_pie)
2270     flag_shlib = 1;
2271 
2272   /* Hot/cold partitioning does not work on this architecture, because of
2273      constant pools (the load instruction cannot necessarily reach that far).
2274      Therefore disable it on this architecture.  */
2275   if (flag_reorder_blocks_and_partition)
2276     {
2277       flag_reorder_blocks_and_partition = 0;
2278       flag_reorder_blocks = 1;
2279     }
2280 }
2281 
2282 /* Implement TARGET_HARD_REGNO_NREGS.  */
2283 
2284 static unsigned int
2285 xtensa_hard_regno_nregs (unsigned int regno, machine_mode mode)
2286 {
2287   if (FP_REG_P (regno))
2288     return CEIL (GET_MODE_SIZE (mode), UNITS_PER_FPREG);
2289   return CEIL (GET_MODE_SIZE (mode), UNITS_PER_WORD);
2290 }
2291 
2292 /* Implement TARGET_HARD_REGNO_MODE_OK.  */
2293 
2294 static bool
2295 xtensa_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
2296 {
2297   return xtensa_hard_regno_mode_ok_p[mode][regno];
2298 }
2299 
2300 /* Implement TARGET_MODES_TIEABLE_P.  */
2301 
2302 static bool
2303 xtensa_modes_tieable_p (machine_mode mode1, machine_mode mode2)
2304 {
2305   return ((GET_MODE_CLASS (mode1) == MODE_FLOAT
2306 	   || GET_MODE_CLASS (mode1) == MODE_COMPLEX_FLOAT)
2307 	  == (GET_MODE_CLASS (mode2) == MODE_FLOAT
2308 	      || GET_MODE_CLASS (mode2) == MODE_COMPLEX_FLOAT));
2309 }
2310 
2311 /* A C compound statement to output to stdio stream STREAM the
2312    assembler syntax for an instruction operand X.  X is an RTL
2313    expression.
2314 
2315    CODE is a value that can be used to specify one of several ways
2316    of printing the operand.  It is used when identical operands
2317    must be printed differently depending on the context.  CODE
2318    comes from the '%' specification that was used to request
2319    printing of the operand.  If the specification was just '%DIGIT'
2320    then CODE is 0; if the specification was '%LTR DIGIT' then CODE
2321    is the ASCII code for LTR.
2322 
2323    If X is a register, this macro should print the register's name.
2324    The names can be found in an array 'reg_names' whose type is
2325    'char *[]'.  'reg_names' is initialized from 'REGISTER_NAMES'.
2326 
2327    When the machine description has a specification '%PUNCT' (a '%'
2328    followed by a punctuation character), this macro is called with
2329    a null pointer for X and the punctuation character for CODE.
2330 
2331    'a', 'c', 'l', and 'n' are reserved.
2332 
2333    The Xtensa specific codes are:
2334 
2335    'd'  CONST_INT, print as signed decimal
2336    'x'  CONST_INT, print as signed hexadecimal
2337    'K'  CONST_INT, print number of bits in mask for EXTUI
2338    'R'  CONST_INT, print (X & 0x1f)
2339    'L'  CONST_INT, print ((32 - X) & 0x1f)
2340    'D'  REG, print second register of double-word register operand
2341    'N'  MEM, print address of next word following a memory operand
2342    'v'  MEM, if memory reference is volatile, output a MEMW before it
2343    't'  any constant, add "@h" suffix for top 16 bits
2344    'b'  any constant, add "@l" suffix for bottom 16 bits
2345 */
2346 
2347 static void
2348 printx (FILE *file, signed int val)
2349 {
2350   /* Print a hexadecimal value in a nice way.  */
2351   if ((val > -0xa) && (val < 0xa))
2352     fprintf (file, "%d", val);
2353   else if (val < 0)
2354     fprintf (file, "-0x%x", -val);
2355   else
2356     fprintf (file, "0x%x", val);
2357 }
2358 
2359 
2360 void
2361 print_operand (FILE *file, rtx x, int letter)
2362 {
2363   if (!x)
2364     error ("PRINT_OPERAND null pointer");
2365 
2366   switch (letter)
2367     {
2368     case 'D':
2369       if (GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
2370 	fprintf (file, "%s", reg_names[xt_true_regnum (x) + 1]);
2371       else
2372 	output_operand_lossage ("invalid %%D value");
2373       break;
2374 
2375     case 'v':
2376       if (GET_CODE (x) == MEM)
2377 	{
2378 	  /* For a volatile memory reference, emit a MEMW before the
2379 	     load or store.  */
2380 	  if (MEM_VOLATILE_P (x) && TARGET_SERIALIZE_VOLATILE)
2381 	    fprintf (file, "memw\n\t");
2382 	}
2383       else
2384 	output_operand_lossage ("invalid %%v value");
2385       break;
2386 
2387     case 'N':
2388       if (GET_CODE (x) == MEM
2389 	  && (GET_MODE (x) == DFmode || GET_MODE (x) == DImode))
2390 	{
2391 	  x = adjust_address (x, GET_MODE (x) == DFmode ? E_SFmode : E_SImode,
2392 			      4);
2393 	  output_address (GET_MODE (x), XEXP (x, 0));
2394 	}
2395       else
2396 	output_operand_lossage ("invalid %%N value");
2397       break;
2398 
2399     case 'K':
2400       if (GET_CODE (x) == CONST_INT)
2401 	{
2402 	  int num_bits = 0;
2403 	  unsigned val = INTVAL (x);
2404 	  while (val & 1)
2405 	    {
2406 	      num_bits += 1;
2407 	      val = val >> 1;
2408 	    }
2409 	  if ((val != 0) || (num_bits == 0) || (num_bits > 16))
2410 	    fatal_insn ("invalid mask", x);
2411 
2412 	  fprintf (file, "%d", num_bits);
2413 	}
2414       else
2415 	output_operand_lossage ("invalid %%K value");
2416       break;
2417 
2418     case 'L':
2419       if (GET_CODE (x) == CONST_INT)
2420 	fprintf (file, HOST_WIDE_INT_PRINT_DEC, (32 - INTVAL (x)) & 0x1f);
2421       else
2422 	output_operand_lossage ("invalid %%L value");
2423       break;
2424 
2425     case 'R':
2426       if (GET_CODE (x) == CONST_INT)
2427 	fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0x1f);
2428       else
2429 	output_operand_lossage ("invalid %%R value");
2430       break;
2431 
2432     case 'x':
2433       if (GET_CODE (x) == CONST_INT)
2434 	printx (file, INTVAL (x));
2435       else
2436 	output_operand_lossage ("invalid %%x value");
2437       break;
2438 
2439     case 'd':
2440       if (GET_CODE (x) == CONST_INT)
2441 	fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
2442       else
2443 	output_operand_lossage ("invalid %%d value");
2444       break;
2445 
2446     case 't':
2447     case 'b':
2448       if (GET_CODE (x) == CONST_INT)
2449 	{
2450 	  printx (file, INTVAL (x));
2451 	  fputs (letter == 't' ? "@h" : "@l", file);
2452 	}
2453       else if (GET_CODE (x) == CONST_DOUBLE)
2454 	{
2455 	  if (GET_MODE (x) == SFmode)
2456 	    {
2457 	      long l;
2458 	      REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (x), l);
2459 	      fprintf (file, "0x%08lx@%c", l, letter == 't' ? 'h' : 'l');
2460 	    }
2461 	  else
2462 	    output_operand_lossage ("invalid %%t/%%b value");
2463 	}
2464       else if (GET_CODE (x) == CONST)
2465 	{
2466 	  /* X must be a symbolic constant on ELF.  Write an expression
2467 	     suitable for 'const16' that sets the high or low 16 bits.  */
2468 	  if (GET_CODE (XEXP (x, 0)) != PLUS
2469 	      || (GET_CODE (XEXP (XEXP (x, 0), 0)) != SYMBOL_REF
2470 		  && GET_CODE (XEXP (XEXP (x, 0), 0)) != LABEL_REF)
2471 	      || GET_CODE (XEXP (XEXP (x, 0), 1)) != CONST_INT)
2472 	    output_operand_lossage ("invalid %%t/%%b value");
2473 	  print_operand (file, XEXP (XEXP (x, 0), 0), 0);
2474 	  fputs (letter == 't' ? "@h" : "@l", file);
2475 	  /* There must be a non-alphanumeric character between 'h' or 'l'
2476 	     and the number.  The '-' is added by print_operand() already.  */
2477 	  if (INTVAL (XEXP (XEXP (x, 0), 1)) >= 0)
2478 	    fputs ("+", file);
2479 	  print_operand (file, XEXP (XEXP (x, 0), 1), 0);
2480 	}
2481       else
2482 	{
2483 	  output_addr_const (file, x);
2484 	  fputs (letter == 't' ? "@h" : "@l", file);
2485 	}
2486       break;
2487 
2488     case 'y':
2489       if (GET_CODE (x) == CONST_DOUBLE &&
2490 	  GET_MODE (x) == SFmode)
2491 	{
2492 	  long l;
2493 	  REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (x), l);
2494 	  fprintf (file, "0x%08lx", l);
2495 	  break;
2496 	}
2497 
2498       /* fall through */
2499 
2500     default:
2501       if (GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
2502 	fprintf (file, "%s", reg_names[xt_true_regnum (x)]);
2503       else if (GET_CODE (x) == MEM)
2504 	output_address (GET_MODE (x), XEXP (x, 0));
2505       else if (GET_CODE (x) == CONST_INT)
2506 	fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
2507       else
2508 	output_addr_const (file, x);
2509     }
2510 }
2511 
2512 
2513 /* A C compound statement to output to stdio stream STREAM the
2514    assembler syntax for an instruction operand that is a memory
2515    reference whose address is ADDR.  ADDR is an RTL expression.  */
2516 
2517 void
2518 print_operand_address (FILE *file, rtx addr)
2519 {
2520   if (!addr)
2521     error ("PRINT_OPERAND_ADDRESS, null pointer");
2522 
2523   switch (GET_CODE (addr))
2524     {
2525     default:
2526       fatal_insn ("invalid address", addr);
2527       break;
2528 
2529     case REG:
2530       fprintf (file, "%s, 0", reg_names [REGNO (addr)]);
2531       break;
2532 
2533     case PLUS:
2534       {
2535 	rtx reg = (rtx)0;
2536 	rtx offset = (rtx)0;
2537 	rtx arg0 = XEXP (addr, 0);
2538 	rtx arg1 = XEXP (addr, 1);
2539 
2540 	if (GET_CODE (arg0) == REG)
2541 	  {
2542 	    reg = arg0;
2543 	    offset = arg1;
2544 	  }
2545 	else if (GET_CODE (arg1) == REG)
2546 	  {
2547 	    reg = arg1;
2548 	    offset = arg0;
2549 	  }
2550 	else
2551 	  fatal_insn ("no register in address", addr);
2552 
2553 	if (CONSTANT_P (offset))
2554 	  {
2555 	    fprintf (file, "%s, ", reg_names [REGNO (reg)]);
2556 	    output_addr_const (file, offset);
2557 	  }
2558 	else
2559 	  fatal_insn ("address offset not a constant", addr);
2560       }
2561       break;
2562 
2563     case LABEL_REF:
2564     case SYMBOL_REF:
2565     case CONST_INT:
2566     case CONST:
2567       output_addr_const (file, addr);
2568       break;
2569     }
2570 }
2571 
2572 /* Implement TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA.  */
2573 
2574 static bool
2575 xtensa_output_addr_const_extra (FILE *fp, rtx x)
2576 {
2577   if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 1)
2578     {
2579       switch (XINT (x, 1))
2580 	{
2581 	case UNSPEC_TPOFF:
2582 	  output_addr_const (fp, XVECEXP (x, 0, 0));
2583 	  fputs ("@TPOFF", fp);
2584 	  return true;
2585 	case UNSPEC_DTPOFF:
2586 	  output_addr_const (fp, XVECEXP (x, 0, 0));
2587 	  fputs ("@DTPOFF", fp);
2588 	  return true;
2589 	case UNSPEC_PLT:
2590 	  if (flag_pic)
2591 	    {
2592 	      output_addr_const (fp, XVECEXP (x, 0, 0));
2593 	      fputs ("@PLT", fp);
2594 	      return true;
2595 	    }
2596 	  break;
2597 	default:
2598 	  break;
2599 	}
2600     }
2601   return false;
2602 }
2603 
2604 static void
2605 xtensa_output_integer_literal_parts (FILE *file, rtx x, int size)
2606 {
2607   if (size > 4 && !(size & (size - 1)))
2608     {
2609       rtx first, second;
2610 
2611       split_double (x, &first, &second);
2612       xtensa_output_integer_literal_parts (file, first, size / 2);
2613       fputs (", ", file);
2614       xtensa_output_integer_literal_parts (file, second, size / 2);
2615     }
2616   else if (size == 4)
2617     {
2618       output_addr_const (file, x);
2619     }
2620   else
2621     {
2622       gcc_unreachable();
2623     }
2624 }
2625 
2626 void
2627 xtensa_output_literal (FILE *file, rtx x, machine_mode mode, int labelno)
2628 {
2629   long value_long[2];
2630 
2631   fprintf (file, "\t.literal .LC%u, ", (unsigned) labelno);
2632 
2633   switch (GET_MODE_CLASS (mode))
2634     {
2635     case MODE_FLOAT:
2636       gcc_assert (GET_CODE (x) == CONST_DOUBLE);
2637 
2638       switch (mode)
2639 	{
2640 	case E_SFmode:
2641 	  REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (x),
2642 				       value_long[0]);
2643 	  if (HOST_BITS_PER_LONG > 32)
2644 	    value_long[0] &= 0xffffffff;
2645 	  fprintf (file, "0x%08lx\n", value_long[0]);
2646 	  break;
2647 
2648 	case E_DFmode:
2649 	  REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x),
2650 				       value_long);
2651 	  if (HOST_BITS_PER_LONG > 32)
2652 	    {
2653 	      value_long[0] &= 0xffffffff;
2654 	      value_long[1] &= 0xffffffff;
2655 	    }
2656 	  fprintf (file, "0x%08lx, 0x%08lx\n",
2657 		   value_long[0], value_long[1]);
2658 	  break;
2659 
2660 	default:
2661 	  gcc_unreachable ();
2662 	}
2663 
2664       break;
2665 
2666     case MODE_INT:
2667     case MODE_PARTIAL_INT:
2668       xtensa_output_integer_literal_parts (file, x, GET_MODE_SIZE (mode));
2669       fputs ("\n", file);
2670       break;
2671 
2672     default:
2673       gcc_unreachable ();
2674     }
2675 }
2676 
2677 static bool
2678 xtensa_call_save_reg(int regno)
2679 {
2680   if (TARGET_WINDOWED_ABI)
2681     return false;
2682 
2683   if (regno == A0_REG)
2684     return crtl->profile || !crtl->is_leaf || crtl->calls_eh_return ||
2685       df_regs_ever_live_p (regno);
2686 
2687   if (crtl->calls_eh_return && regno >= 2 && regno < 4)
2688     return true;
2689 
2690   return !fixed_regs[regno] && !call_used_regs[regno] &&
2691     df_regs_ever_live_p (regno);
2692 }
2693 
2694 /* Return the bytes needed to compute the frame pointer from the current
2695    stack pointer.  */
2696 
2697 #define STACK_BYTES (STACK_BOUNDARY / BITS_PER_UNIT)
2698 #define XTENSA_STACK_ALIGN(LOC) (((LOC) + STACK_BYTES-1) & ~(STACK_BYTES-1))
2699 
2700 long
2701 compute_frame_size (poly_int64 size)
2702 {
2703   int regno;
2704 
2705   if (reload_completed && cfun->machine->frame_laid_out)
2706     return cfun->machine->current_frame_size;
2707 
2708   /* Add space for the incoming static chain value.  */
2709   if (cfun->static_chain_decl != NULL)
2710     size += (1 * UNITS_PER_WORD);
2711 
2712   cfun->machine->callee_save_size = 0;
2713   for (regno = 0; regno < FIRST_PSEUDO_REGISTER; ++regno)
2714     {
2715       if (xtensa_call_save_reg(regno))
2716 	cfun->machine->callee_save_size += UNITS_PER_WORD;
2717     }
2718 
2719   cfun->machine->current_frame_size =
2720     XTENSA_STACK_ALIGN (size
2721 			+ cfun->machine->callee_save_size
2722 			+ crtl->outgoing_args_size
2723 			+ (WINDOW_SIZE * UNITS_PER_WORD));
2724   cfun->machine->callee_save_size =
2725     XTENSA_STACK_ALIGN (cfun->machine->callee_save_size);
2726   cfun->machine->frame_laid_out = true;
2727   return cfun->machine->current_frame_size;
2728 }
2729 
2730 
2731 bool
2732 xtensa_frame_pointer_required (void)
2733 {
2734   /* The code to expand builtin_frame_addr and builtin_return_addr
2735      currently uses the hard_frame_pointer instead of frame_pointer.
2736      This seems wrong but maybe it's necessary for other architectures.
2737      This function is derived from the i386 code.  */
2738 
2739   if (cfun->machine->accesses_prev_frame)
2740     return true;
2741 
2742   return false;
2743 }
2744 
2745 HOST_WIDE_INT
2746 xtensa_initial_elimination_offset (int from, int to ATTRIBUTE_UNUSED)
2747 {
2748   long frame_size = compute_frame_size (get_frame_size ());
2749   HOST_WIDE_INT offset;
2750 
2751   switch (from)
2752     {
2753     case FRAME_POINTER_REGNUM:
2754       if (FRAME_GROWS_DOWNWARD)
2755 	offset = frame_size - (WINDOW_SIZE * UNITS_PER_WORD)
2756 	  - cfun->machine->callee_save_size;
2757       else
2758 	offset = 0;
2759       break;
2760     case ARG_POINTER_REGNUM:
2761       offset = frame_size;
2762       break;
2763     default:
2764       gcc_unreachable ();
2765     }
2766 
2767   return offset;
2768 }
2769 
2770 /* minimum frame = reg save area (4 words) plus static chain (1 word)
2771    and the total number of words must be a multiple of 128 bits.  */
2772 #define MIN_FRAME_SIZE (8 * UNITS_PER_WORD)
2773 
2774 void
2775 xtensa_expand_prologue (void)
2776 {
2777   HOST_WIDE_INT total_size;
2778   rtx_insn *insn = NULL;
2779   rtx note_rtx;
2780 
2781 
2782   total_size = compute_frame_size (get_frame_size ());
2783 
2784   if (flag_stack_usage_info)
2785     current_function_static_stack_size = total_size;
2786 
2787   if (TARGET_WINDOWED_ABI)
2788     {
2789       if (total_size < (1 << (12+3)))
2790 	insn = emit_insn (gen_entry (GEN_INT (total_size)));
2791       else
2792 	{
2793 	  /* Use a8 as a temporary since a0-a7 may be live.  */
2794 	  rtx tmp_reg = gen_rtx_REG (Pmode, A8_REG);
2795 	  emit_insn (gen_entry (GEN_INT (MIN_FRAME_SIZE)));
2796 	  emit_move_insn (tmp_reg, GEN_INT (total_size - MIN_FRAME_SIZE));
2797 	  emit_insn (gen_subsi3 (tmp_reg, stack_pointer_rtx, tmp_reg));
2798 	  insn = emit_insn (gen_movsi (stack_pointer_rtx, tmp_reg));
2799 	}
2800     }
2801   else
2802     {
2803       int regno;
2804       HOST_WIDE_INT offset = 0;
2805       int callee_save_size = cfun->machine->callee_save_size;
2806 
2807       /* -128 is a limit of single addi instruction. */
2808       if (total_size > 0 && total_size <= 128)
2809 	{
2810 	  insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
2811 					GEN_INT (-total_size)));
2812 	  RTX_FRAME_RELATED_P (insn) = 1;
2813 	  note_rtx = gen_rtx_SET (stack_pointer_rtx,
2814 				  plus_constant (Pmode, stack_pointer_rtx,
2815 						 -total_size));
2816 	  add_reg_note (insn, REG_FRAME_RELATED_EXPR, note_rtx);
2817 	  offset = total_size - UNITS_PER_WORD;
2818 	}
2819       else if (callee_save_size)
2820 	{
2821 	  /* 1020 is maximal s32i offset, if the frame is bigger than that
2822 	   * we move sp to the end of callee-saved save area, save and then
2823 	   * move it to its final location. */
2824 	  if (total_size > 1024)
2825 	    {
2826 	      insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
2827 					    GEN_INT (-callee_save_size)));
2828 	      RTX_FRAME_RELATED_P (insn) = 1;
2829 	      note_rtx = gen_rtx_SET (stack_pointer_rtx,
2830 				      plus_constant (Pmode, stack_pointer_rtx,
2831 						     -callee_save_size));
2832 	      add_reg_note (insn, REG_FRAME_RELATED_EXPR, note_rtx);
2833 	      offset = callee_save_size - UNITS_PER_WORD;
2834 	    }
2835 	  else
2836 	    {
2837 	      rtx tmp_reg = gen_rtx_REG (Pmode, A9_REG);
2838 	      emit_move_insn (tmp_reg, GEN_INT (total_size));
2839 	      insn = emit_insn (gen_subsi3 (stack_pointer_rtx,
2840 					    stack_pointer_rtx, tmp_reg));
2841 	      RTX_FRAME_RELATED_P (insn) = 1;
2842 	      note_rtx = gen_rtx_SET (stack_pointer_rtx,
2843 				      plus_constant (Pmode, stack_pointer_rtx,
2844 						     -total_size));
2845 	      add_reg_note (insn, REG_FRAME_RELATED_EXPR, note_rtx);
2846 	      offset = total_size - UNITS_PER_WORD;
2847 	    }
2848 	}
2849 
2850       for (regno = 0; regno < FIRST_PSEUDO_REGISTER; ++regno)
2851 	{
2852 	  if (xtensa_call_save_reg(regno))
2853 	    {
2854 	      rtx x = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (offset));
2855 	      rtx mem = gen_frame_mem (SImode, x);
2856 	      rtx reg = gen_rtx_REG (SImode, regno);
2857 
2858 	      offset -= UNITS_PER_WORD;
2859 	      insn = emit_move_insn (mem, reg);
2860 	      RTX_FRAME_RELATED_P (insn) = 1;
2861 	      add_reg_note (insn, REG_FRAME_RELATED_EXPR,
2862 			    gen_rtx_SET (mem, reg));
2863 	    }
2864 	}
2865       if (total_size > 1024
2866 	  || (!callee_save_size && total_size > 128))
2867 	{
2868 	  rtx tmp_reg = gen_rtx_REG (Pmode, A9_REG);
2869 	  emit_move_insn (tmp_reg, GEN_INT (total_size -
2870 					    callee_save_size));
2871 	  insn = emit_insn (gen_subsi3 (stack_pointer_rtx,
2872 					stack_pointer_rtx, tmp_reg));
2873 	  RTX_FRAME_RELATED_P (insn) = 1;
2874 	  note_rtx = gen_rtx_SET (stack_pointer_rtx,
2875 				  plus_constant (Pmode, stack_pointer_rtx,
2876 						 callee_save_size -
2877 						 total_size));
2878 	  add_reg_note (insn, REG_FRAME_RELATED_EXPR, note_rtx);
2879 	}
2880     }
2881 
2882   if (frame_pointer_needed)
2883     {
2884       if (cfun->machine->set_frame_ptr_insn)
2885 	{
2886 	  rtx_insn *first;
2887 
2888 	  push_topmost_sequence ();
2889 	  first = get_insns ();
2890 	  pop_topmost_sequence ();
2891 
2892 	  /* For all instructions prior to set_frame_ptr_insn, replace
2893 	     hard_frame_pointer references with stack_pointer.  */
2894 	  for (insn = first;
2895 	       insn != cfun->machine->set_frame_ptr_insn;
2896 	       insn = NEXT_INSN (insn))
2897 	    {
2898 	      if (INSN_P (insn))
2899 		{
2900 		  PATTERN (insn) = replace_rtx (copy_rtx (PATTERN (insn)),
2901 						hard_frame_pointer_rtx,
2902 						stack_pointer_rtx);
2903 		  df_insn_rescan (insn);
2904 		}
2905 	    }
2906 	}
2907       else
2908         {
2909 	  insn = emit_insn (gen_movsi (hard_frame_pointer_rtx,
2910 				       stack_pointer_rtx));
2911 	  if (!TARGET_WINDOWED_ABI)
2912 	    {
2913 	      note_rtx = gen_rtx_SET (hard_frame_pointer_rtx,
2914 				      stack_pointer_rtx);
2915 	      RTX_FRAME_RELATED_P (insn) = 1;
2916 	      add_reg_note (insn, REG_FRAME_RELATED_EXPR, note_rtx);
2917 	    }
2918 	}
2919     }
2920 
2921   if (TARGET_WINDOWED_ABI)
2922     {
2923       /* Create a note to describe the CFA.  Because this is only used to set
2924 	 DW_AT_frame_base for debug info, don't bother tracking changes through
2925 	 each instruction in the prologue.  It just takes up space.  */
2926       note_rtx = gen_rtx_SET ((frame_pointer_needed
2927 			       ? hard_frame_pointer_rtx
2928 			       : stack_pointer_rtx),
2929 			      plus_constant (Pmode, stack_pointer_rtx,
2930 					     -total_size));
2931       RTX_FRAME_RELATED_P (insn) = 1;
2932       add_reg_note (insn, REG_FRAME_RELATED_EXPR, note_rtx);
2933     }
2934 }
2935 
2936 void
2937 xtensa_expand_epilogue (void)
2938 {
2939   if (!TARGET_WINDOWED_ABI)
2940     {
2941       int regno;
2942       HOST_WIDE_INT offset;
2943 
2944       if (cfun->machine->current_frame_size > (frame_pointer_needed ? 127 : 1024))
2945 	{
2946 	  rtx tmp_reg = gen_rtx_REG (Pmode, A9_REG);
2947 	  emit_move_insn (tmp_reg, GEN_INT (cfun->machine->current_frame_size -
2948 					    cfun->machine->callee_save_size));
2949 	  emit_insn (gen_addsi3 (stack_pointer_rtx, frame_pointer_needed ?
2950 				 hard_frame_pointer_rtx : stack_pointer_rtx,
2951 				 tmp_reg));
2952 	  offset = cfun->machine->callee_save_size - UNITS_PER_WORD;
2953 	}
2954       else
2955 	{
2956 	  if (frame_pointer_needed)
2957 	    emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
2958 	  offset = cfun->machine->current_frame_size - UNITS_PER_WORD;
2959 	}
2960 
2961       /* Prevent reordering of saved a0 update and loading it back from
2962 	 the save area.  */
2963       if (crtl->calls_eh_return)
2964 	emit_insn (gen_blockage ());
2965 
2966       for (regno = 0; regno < FIRST_PSEUDO_REGISTER; ++regno)
2967 	{
2968 	  if (xtensa_call_save_reg(regno))
2969 	    {
2970 	      rtx x = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (offset));
2971 
2972 	      offset -= UNITS_PER_WORD;
2973 	      emit_move_insn (gen_rtx_REG (SImode, regno),
2974 			      gen_frame_mem (SImode, x));
2975 	    }
2976 	}
2977 
2978       if (cfun->machine->current_frame_size > 0)
2979 	{
2980 	  if (frame_pointer_needed || /* always reachable with addi */
2981 	      cfun->machine->current_frame_size > 1024 ||
2982 	      cfun->machine->current_frame_size <= 127)
2983 	    {
2984 	      if (cfun->machine->current_frame_size <= 127)
2985 		offset = cfun->machine->current_frame_size;
2986 	      else
2987 		offset = cfun->machine->callee_save_size;
2988 
2989 	      emit_insn (gen_addsi3 (stack_pointer_rtx,
2990 				     stack_pointer_rtx,
2991 				     GEN_INT (offset)));
2992 	    }
2993 	  else
2994 	    {
2995 	      rtx tmp_reg = gen_rtx_REG (Pmode, A9_REG);
2996 	      emit_move_insn (tmp_reg,
2997 			      GEN_INT (cfun->machine->current_frame_size));
2998 	      emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
2999 				     tmp_reg));
3000 	    }
3001 	}
3002 
3003       if (crtl->calls_eh_return)
3004 	emit_insn (gen_add3_insn (stack_pointer_rtx,
3005 				  stack_pointer_rtx,
3006 				  EH_RETURN_STACKADJ_RTX));
3007     }
3008   cfun->machine->epilogue_done = true;
3009   emit_jump_insn (gen_return ());
3010 }
3011 
3012 bool
3013 xtensa_use_return_instruction_p (void)
3014 {
3015   if (!reload_completed)
3016     return false;
3017   if (TARGET_WINDOWED_ABI)
3018     return true;
3019   if (compute_frame_size (get_frame_size ()) == 0)
3020     return true;
3021   return cfun->machine->epilogue_done;
3022 }
3023 
3024 void
3025 xtensa_set_return_address (rtx address, rtx scratch)
3026 {
3027   HOST_WIDE_INT total_size = compute_frame_size (get_frame_size ());
3028   rtx frame = frame_pointer_needed ?
3029     hard_frame_pointer_rtx : stack_pointer_rtx;
3030   rtx a0_addr = plus_constant (Pmode, frame,
3031 			       total_size - UNITS_PER_WORD);
3032   rtx note = gen_rtx_SET (gen_frame_mem (SImode, a0_addr),
3033 			  gen_rtx_REG (SImode, A0_REG));
3034   rtx insn;
3035 
3036   if (total_size > 1024) {
3037     emit_move_insn (scratch, GEN_INT (total_size - UNITS_PER_WORD));
3038     emit_insn (gen_addsi3 (scratch, frame, scratch));
3039     a0_addr = scratch;
3040   }
3041 
3042   insn = emit_move_insn (gen_frame_mem (SImode, a0_addr), address);
3043   RTX_FRAME_RELATED_P (insn) = 1;
3044   add_reg_note (insn, REG_FRAME_RELATED_EXPR, note);
3045 }
3046 
3047 rtx
3048 xtensa_return_addr (int count, rtx frame)
3049 {
3050   rtx result, retaddr, curaddr, label;
3051 
3052   if (!TARGET_WINDOWED_ABI)
3053     {
3054       if (count != 0)
3055 	return const0_rtx;
3056 
3057       return get_hard_reg_initial_val (Pmode, A0_REG);
3058     }
3059 
3060   if (count == -1)
3061     retaddr = gen_rtx_REG (Pmode, A0_REG);
3062   else
3063     {
3064       rtx addr = plus_constant (Pmode, frame, -4 * UNITS_PER_WORD);
3065       addr = memory_address (Pmode, addr);
3066       retaddr = gen_reg_rtx (Pmode);
3067       emit_move_insn (retaddr, gen_rtx_MEM (Pmode, addr));
3068     }
3069 
3070   /* The 2 most-significant bits of the return address on Xtensa hold
3071      the register window size.  To get the real return address, these
3072      bits must be replaced with the high bits from some address in the
3073      code.  */
3074 
3075   /* Get the 2 high bits of a local label in the code.  */
3076   curaddr = gen_reg_rtx (Pmode);
3077   label = gen_label_rtx ();
3078   emit_label (label);
3079   LABEL_PRESERVE_P (label) = 1;
3080   emit_move_insn (curaddr, gen_rtx_LABEL_REF (Pmode, label));
3081   emit_insn (gen_lshrsi3 (curaddr, curaddr, GEN_INT (30)));
3082   emit_insn (gen_ashlsi3 (curaddr, curaddr, GEN_INT (30)));
3083 
3084   /* Clear the 2 high bits of the return address.  */
3085   result = gen_reg_rtx (Pmode);
3086   emit_insn (gen_ashlsi3 (result, retaddr, GEN_INT (2)));
3087   emit_insn (gen_lshrsi3 (result, result, GEN_INT (2)));
3088 
3089   /* Combine them to get the result.  */
3090   emit_insn (gen_iorsi3 (result, result, curaddr));
3091   return result;
3092 }
3093 
3094 /* Disable the use of word-sized or smaller complex modes for structures,
3095    and for function arguments in particular, where they cause problems with
3096    register a7.  The xtensa_copy_incoming_a7 function assumes that there is
3097    a single reference to an argument in a7, but with small complex modes the
3098    real and imaginary components may be extracted separately, leading to two
3099    uses of the register, only one of which would be replaced.  */
3100 
3101 static bool
3102 xtensa_member_type_forces_blk (const_tree, machine_mode mode)
3103 {
3104   return mode == CQImode || mode == CHImode;
3105 }
3106 
3107 /* Create the va_list data type.
3108 
3109    This structure is set up by __builtin_saveregs.  The __va_reg field
3110    points to a stack-allocated region holding the contents of the
3111    incoming argument registers.  The __va_ndx field is an index
3112    initialized to the position of the first unnamed (variable)
3113    argument.  This same index is also used to address the arguments
3114    passed in memory.  Thus, the __va_stk field is initialized to point
3115    to the position of the first argument in memory offset to account
3116    for the arguments passed in registers and to account for the size
3117    of the argument registers not being 16-byte aligned.  E.G., there
3118    are 6 argument registers of 4 bytes each, but we want the __va_ndx
3119    for the first stack argument to have the maximal alignment of 16
3120    bytes, so we offset the __va_stk address by 32 bytes so that
3121    __va_stk[32] references the first argument on the stack.  */
3122 
3123 static tree
3124 xtensa_build_builtin_va_list (void)
3125 {
3126   tree f_stk, f_reg, f_ndx, record, type_decl;
3127 
3128   record = (*lang_hooks.types.make_type) (RECORD_TYPE);
3129   type_decl = build_decl (BUILTINS_LOCATION,
3130 			  TYPE_DECL, get_identifier ("__va_list_tag"), record);
3131 
3132   f_stk = build_decl (BUILTINS_LOCATION,
3133 		      FIELD_DECL, get_identifier ("__va_stk"),
3134 		      ptr_type_node);
3135   f_reg = build_decl (BUILTINS_LOCATION,
3136 		      FIELD_DECL, get_identifier ("__va_reg"),
3137 		      ptr_type_node);
3138   f_ndx = build_decl (BUILTINS_LOCATION,
3139 		      FIELD_DECL, get_identifier ("__va_ndx"),
3140 		      integer_type_node);
3141 
3142   DECL_FIELD_CONTEXT (f_stk) = record;
3143   DECL_FIELD_CONTEXT (f_reg) = record;
3144   DECL_FIELD_CONTEXT (f_ndx) = record;
3145 
3146   TYPE_STUB_DECL (record) = type_decl;
3147   TYPE_NAME (record) = type_decl;
3148   TYPE_FIELDS (record) = f_stk;
3149   DECL_CHAIN (f_stk) = f_reg;
3150   DECL_CHAIN (f_reg) = f_ndx;
3151 
3152   layout_type (record);
3153   return record;
3154 }
3155 
3156 
3157 /* Save the incoming argument registers on the stack.  Returns the
3158    address of the saved registers.  */
3159 
3160 static rtx
3161 xtensa_builtin_saveregs (void)
3162 {
3163   rtx gp_regs;
3164   int arg_words = crtl->args.info.arg_words;
3165   int gp_left = MAX_ARGS_IN_REGISTERS - arg_words;
3166 
3167   if (gp_left <= 0)
3168     return const0_rtx;
3169 
3170   /* Allocate the general-purpose register space.  */
3171   gp_regs = assign_stack_local
3172     (BLKmode, MAX_ARGS_IN_REGISTERS * UNITS_PER_WORD, -1);
3173   set_mem_alias_set (gp_regs, get_varargs_alias_set ());
3174 
3175   /* Now store the incoming registers.  */
3176   cfun->machine->need_a7_copy = TARGET_WINDOWED_ABI;
3177   cfun->machine->vararg_a7 = true;
3178   move_block_from_reg (GP_ARG_FIRST + arg_words,
3179 		       adjust_address (gp_regs, BLKmode,
3180 				       arg_words * UNITS_PER_WORD),
3181 		       gp_left);
3182   if (cfun->machine->vararg_a7_copy != 0)
3183     emit_insn_before (cfun->machine->vararg_a7_copy, get_insns ());
3184 
3185   return XEXP (gp_regs, 0);
3186 }
3187 
3188 
3189 /* Implement `va_start' for varargs and stdarg.  We look at the
3190    current function to fill in an initial va_list.  */
3191 
3192 static void
3193 xtensa_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
3194 {
3195   tree f_stk, stk;
3196   tree f_reg, reg;
3197   tree f_ndx, ndx;
3198   tree t, u;
3199   int arg_words;
3200 
3201   arg_words = crtl->args.info.arg_words;
3202 
3203   f_stk = TYPE_FIELDS (va_list_type_node);
3204   f_reg = DECL_CHAIN (f_stk);
3205   f_ndx = DECL_CHAIN (f_reg);
3206 
3207   stk = build3 (COMPONENT_REF, TREE_TYPE (f_stk), valist, f_stk, NULL_TREE);
3208   reg = build3 (COMPONENT_REF, TREE_TYPE (f_reg), unshare_expr (valist),
3209 		f_reg, NULL_TREE);
3210   ndx = build3 (COMPONENT_REF, TREE_TYPE (f_ndx), unshare_expr (valist),
3211 		f_ndx, NULL_TREE);
3212 
3213   /* Call __builtin_saveregs; save the result in __va_reg */
3214   u = make_tree (sizetype, expand_builtin_saveregs ());
3215   u = fold_convert (ptr_type_node, u);
3216   t = build2 (MODIFY_EXPR, ptr_type_node, reg, u);
3217   TREE_SIDE_EFFECTS (t) = 1;
3218   expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3219 
3220   /* Set the __va_stk member to ($arg_ptr - 32).  */
3221   u = make_tree (ptr_type_node, virtual_incoming_args_rtx);
3222   u = fold_build_pointer_plus_hwi (u, -32);
3223   t = build2 (MODIFY_EXPR, ptr_type_node, stk, u);
3224   TREE_SIDE_EFFECTS (t) = 1;
3225   expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3226 
3227   /* Set the __va_ndx member.  If the first variable argument is on
3228      the stack, adjust __va_ndx by 2 words to account for the extra
3229      alignment offset for __va_stk.  */
3230   if (arg_words >= MAX_ARGS_IN_REGISTERS)
3231     arg_words += 2;
3232   t = build2 (MODIFY_EXPR, integer_type_node, ndx,
3233 	      build_int_cst (integer_type_node, arg_words * UNITS_PER_WORD));
3234   TREE_SIDE_EFFECTS (t) = 1;
3235   expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3236 }
3237 
3238 
3239 /* Implement `va_arg'.  */
3240 
3241 static tree
3242 xtensa_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
3243 			     gimple_seq *post_p ATTRIBUTE_UNUSED)
3244 {
3245   tree f_stk, stk;
3246   tree f_reg, reg;
3247   tree f_ndx, ndx;
3248   tree type_size, array, orig_ndx, addr, size, va_size, t;
3249   tree lab_false, lab_over, lab_false2;
3250   bool indirect;
3251 
3252   indirect = pass_by_reference (NULL, TYPE_MODE (type), type, false);
3253   if (indirect)
3254     type = build_pointer_type (type);
3255 
3256   /* Handle complex values as separate real and imaginary parts.  */
3257   if (TREE_CODE (type) == COMPLEX_TYPE)
3258     {
3259       tree real_part, imag_part;
3260 
3261       real_part = xtensa_gimplify_va_arg_expr (valist, TREE_TYPE (type),
3262 					       pre_p, NULL);
3263       real_part = get_initialized_tmp_var (real_part, pre_p, NULL);
3264 
3265       imag_part = xtensa_gimplify_va_arg_expr (unshare_expr (valist),
3266 					       TREE_TYPE (type),
3267 					       pre_p, NULL);
3268       imag_part = get_initialized_tmp_var (imag_part, pre_p, NULL);
3269 
3270       return build2 (COMPLEX_EXPR, type, real_part, imag_part);
3271     }
3272 
3273   f_stk = TYPE_FIELDS (va_list_type_node);
3274   f_reg = DECL_CHAIN (f_stk);
3275   f_ndx = DECL_CHAIN (f_reg);
3276 
3277   stk = build3 (COMPONENT_REF, TREE_TYPE (f_stk), valist,
3278 		f_stk, NULL_TREE);
3279   reg = build3 (COMPONENT_REF, TREE_TYPE (f_reg), unshare_expr (valist),
3280 		f_reg, NULL_TREE);
3281   ndx = build3 (COMPONENT_REF, TREE_TYPE (f_ndx), unshare_expr (valist),
3282 		f_ndx, NULL_TREE);
3283 
3284   type_size = size_in_bytes (type);
3285   va_size = round_up (type_size, UNITS_PER_WORD);
3286   gimplify_expr (&va_size, pre_p, NULL, is_gimple_val, fb_rvalue);
3287 
3288 
3289   /* First align __va_ndx if necessary for this arg:
3290 
3291      orig_ndx = (AP).__va_ndx;
3292      if (__alignof__ (TYPE) > 4 )
3293        orig_ndx = ((orig_ndx + __alignof__ (TYPE) - 1)
3294 			& -__alignof__ (TYPE)); */
3295 
3296   orig_ndx = get_initialized_tmp_var (ndx, pre_p, NULL);
3297 
3298   if (TYPE_ALIGN (type) > BITS_PER_WORD)
3299     {
3300       int align = MIN (TYPE_ALIGN (type), STACK_BOUNDARY) / BITS_PER_UNIT;
3301 
3302       t = build2 (PLUS_EXPR, integer_type_node, unshare_expr (orig_ndx),
3303 		  build_int_cst (integer_type_node, align - 1));
3304       t = build2 (BIT_AND_EXPR, integer_type_node, t,
3305 		  build_int_cst (integer_type_node, -align));
3306       gimplify_assign (unshare_expr (orig_ndx), t, pre_p);
3307     }
3308 
3309 
3310   /* Increment __va_ndx to point past the argument:
3311 
3312      (AP).__va_ndx = orig_ndx + __va_size (TYPE); */
3313 
3314   t = fold_convert (integer_type_node, va_size);
3315   t = build2 (PLUS_EXPR, integer_type_node, orig_ndx, t);
3316   gimplify_assign (unshare_expr (ndx), t, pre_p);
3317 
3318 
3319   /* Check if the argument is in registers:
3320 
3321      if ((AP).__va_ndx <= __MAX_ARGS_IN_REGISTERS * 4
3322          && !must_pass_in_stack (type))
3323         __array = (AP).__va_reg; */
3324 
3325   array = create_tmp_var (ptr_type_node);
3326 
3327   lab_over = NULL;
3328   if (!targetm.calls.must_pass_in_stack (TYPE_MODE (type), type))
3329     {
3330       lab_false = create_artificial_label (UNKNOWN_LOCATION);
3331       lab_over = create_artificial_label (UNKNOWN_LOCATION);
3332 
3333       t = build2 (GT_EXPR, boolean_type_node, unshare_expr (ndx),
3334 		  build_int_cst (integer_type_node,
3335 				 MAX_ARGS_IN_REGISTERS * UNITS_PER_WORD));
3336       t = build3 (COND_EXPR, void_type_node, t,
3337 		  build1 (GOTO_EXPR, void_type_node, lab_false),
3338 		  NULL_TREE);
3339       gimplify_and_add (t, pre_p);
3340 
3341       gimplify_assign (unshare_expr (array), reg, pre_p);
3342 
3343       t = build1 (GOTO_EXPR, void_type_node, lab_over);
3344       gimplify_and_add (t, pre_p);
3345 
3346       t = build1 (LABEL_EXPR, void_type_node, lab_false);
3347       gimplify_and_add (t, pre_p);
3348     }
3349 
3350 
3351   /* ...otherwise, the argument is on the stack (never split between
3352      registers and the stack -- change __va_ndx if necessary):
3353 
3354      else
3355        {
3356 	 if (orig_ndx <= __MAX_ARGS_IN_REGISTERS * 4)
3357 	     (AP).__va_ndx = 32 + __va_size (TYPE);
3358 	 __array = (AP).__va_stk;
3359        } */
3360 
3361   lab_false2 = create_artificial_label (UNKNOWN_LOCATION);
3362 
3363   t = build2 (GT_EXPR, boolean_type_node, unshare_expr (orig_ndx),
3364 	      build_int_cst (integer_type_node,
3365 			     MAX_ARGS_IN_REGISTERS * UNITS_PER_WORD));
3366   t = build3 (COND_EXPR, void_type_node, t,
3367 	      build1 (GOTO_EXPR, void_type_node, lab_false2),
3368 	      NULL_TREE);
3369   gimplify_and_add (t, pre_p);
3370 
3371   t = size_binop (PLUS_EXPR, unshare_expr (va_size), size_int (32));
3372   t = fold_convert (integer_type_node, t);
3373   gimplify_assign (unshare_expr (ndx), t, pre_p);
3374 
3375   t = build1 (LABEL_EXPR, void_type_node, lab_false2);
3376   gimplify_and_add (t, pre_p);
3377 
3378   gimplify_assign (array, stk, pre_p);
3379 
3380   if (lab_over)
3381     {
3382       t = build1 (LABEL_EXPR, void_type_node, lab_over);
3383       gimplify_and_add (t, pre_p);
3384     }
3385 
3386 
3387   /* Given the base array pointer (__array) and index to the subsequent
3388      argument (__va_ndx), find the address:
3389 
3390      __array + (AP).__va_ndx - (BYTES_BIG_ENDIAN && sizeof (TYPE) < 4
3391 				? sizeof (TYPE)
3392 				: __va_size (TYPE))
3393 
3394      The results are endian-dependent because values smaller than one word
3395      are aligned differently.  */
3396 
3397 
3398   if (BYTES_BIG_ENDIAN && TREE_CODE (type_size) == INTEGER_CST)
3399     {
3400       t = fold_build2 (GE_EXPR, boolean_type_node, unshare_expr (type_size),
3401 		       size_int (PARM_BOUNDARY / BITS_PER_UNIT));
3402       t = fold_build3 (COND_EXPR, sizetype, t, unshare_expr (va_size),
3403 		       unshare_expr (type_size));
3404       size = t;
3405     }
3406   else
3407     size = unshare_expr (va_size);
3408 
3409   t = fold_convert (sizetype, unshare_expr (ndx));
3410   t = build2 (MINUS_EXPR, sizetype, t, size);
3411   addr = fold_build_pointer_plus (unshare_expr (array), t);
3412 
3413   addr = fold_convert (build_pointer_type (type), addr);
3414   if (indirect)
3415     addr = build_va_arg_indirect_ref (addr);
3416   return build_va_arg_indirect_ref (addr);
3417 }
3418 
3419 
3420 /* Builtins.  */
3421 
3422 enum xtensa_builtin
3423 {
3424   XTENSA_BUILTIN_UMULSIDI3,
3425   XTENSA_BUILTIN_max
3426 };
3427 
3428 
3429 static void
3430 xtensa_init_builtins (void)
3431 {
3432   tree ftype, decl;
3433 
3434   ftype = build_function_type_list (unsigned_intDI_type_node,
3435 				    unsigned_intSI_type_node,
3436 				    unsigned_intSI_type_node, NULL_TREE);
3437 
3438   decl = add_builtin_function ("__builtin_umulsidi3", ftype,
3439 			       XTENSA_BUILTIN_UMULSIDI3, BUILT_IN_MD,
3440 			       "__umulsidi3", NULL_TREE);
3441   TREE_NOTHROW (decl) = 1;
3442   TREE_READONLY (decl) = 1;
3443 }
3444 
3445 
3446 static tree
3447 xtensa_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED, tree *args,
3448 		     bool ignore ATTRIBUTE_UNUSED)
3449 {
3450   unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
3451   tree arg0, arg1;
3452 
3453   switch (fcode)
3454     {
3455     case XTENSA_BUILTIN_UMULSIDI3:
3456       arg0 = args[0];
3457       arg1 = args[1];
3458       if ((TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == INTEGER_CST)
3459 	  || TARGET_MUL32_HIGH)
3460 	return fold_build2 (MULT_EXPR, unsigned_intDI_type_node,
3461 			    fold_convert (unsigned_intDI_type_node, arg0),
3462 			    fold_convert (unsigned_intDI_type_node, arg1));
3463       break;
3464 
3465     default:
3466       internal_error ("bad builtin code");
3467       break;
3468     }
3469 
3470   return NULL;
3471 }
3472 
3473 
3474 static rtx
3475 xtensa_expand_builtin (tree exp, rtx target,
3476 		       rtx subtarget ATTRIBUTE_UNUSED,
3477 		       machine_mode mode ATTRIBUTE_UNUSED,
3478 		       int ignore)
3479 {
3480   tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
3481   unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
3482 
3483   switch (fcode)
3484     {
3485     case XTENSA_BUILTIN_UMULSIDI3:
3486       /* The umulsidi3 builtin is just a mechanism to avoid calling the real
3487 	 __umulsidi3 function when the Xtensa configuration can directly
3488 	 implement it.  If not, just call the function.  */
3489       return expand_call (exp, target, ignore);
3490 
3491     default:
3492       internal_error ("bad builtin code");
3493     }
3494   return NULL_RTX;
3495 }
3496 
3497 /* Worker function for TARGET_PREFERRED_RELOAD_CLASS.  */
3498 
3499 static reg_class_t
3500 xtensa_preferred_reload_class (rtx x, reg_class_t rclass)
3501 {
3502   if (CONSTANT_P (x) && CONST_DOUBLE_P (x))
3503     return NO_REGS;
3504 
3505   /* Don't use the stack pointer or hard frame pointer for reloads!
3506      The hard frame pointer would normally be OK except that it may
3507      briefly hold an incoming argument in the prologue, and reload
3508      won't know that it is live because the hard frame pointer is
3509      treated specially.  */
3510 
3511   if (rclass == AR_REGS || rclass == GR_REGS)
3512     return RL_REGS;
3513 
3514   return rclass;
3515 }
3516 
3517 /* Worker function for TARGET_PREFERRED_OUTPUT_RELOAD_CLASS.  */
3518 
3519 static reg_class_t
3520 xtensa_preferred_output_reload_class (rtx x ATTRIBUTE_UNUSED,
3521 				      reg_class_t rclass)
3522 {
3523   /* Don't use the stack pointer or hard frame pointer for reloads!
3524      The hard frame pointer would normally be OK except that it may
3525      briefly hold an incoming argument in the prologue, and reload
3526      won't know that it is live because the hard frame pointer is
3527      treated specially.  */
3528 
3529   if (rclass == AR_REGS || rclass == GR_REGS)
3530     return RL_REGS;
3531 
3532   return rclass;
3533 }
3534 
3535 /* Worker function for TARGET_SECONDARY_RELOAD.  */
3536 
3537 static reg_class_t
3538 xtensa_secondary_reload (bool in_p, rtx x, reg_class_t rclass,
3539 			 machine_mode mode, secondary_reload_info *sri)
3540 {
3541   int regno;
3542 
3543   if (in_p && constantpool_mem_p (x))
3544     {
3545       if (rclass == FP_REGS)
3546 	return RL_REGS;
3547 
3548       if (mode == QImode)
3549 	sri->icode = CODE_FOR_reloadqi_literal;
3550       else if (mode == HImode)
3551 	sri->icode = CODE_FOR_reloadhi_literal;
3552     }
3553 
3554   regno = xt_true_regnum (x);
3555   if (ACC_REG_P (regno))
3556     return ((rclass == GR_REGS || rclass == RL_REGS) ? NO_REGS : RL_REGS);
3557   if (rclass == ACC_REG)
3558     return (GP_REG_P (regno) ? NO_REGS : RL_REGS);
3559 
3560   return NO_REGS;
3561 }
3562 
3563 
3564 void
3565 order_regs_for_local_alloc (void)
3566 {
3567   if (!leaf_function_p ())
3568     {
3569       static const int reg_nonleaf_alloc_order[FIRST_PSEUDO_REGISTER] =
3570 	REG_ALLOC_ORDER;
3571       static const int reg_nonleaf_alloc_order_call0[FIRST_PSEUDO_REGISTER] =
3572 	{
3573 	  11, 10,  9,  8,  7,  6,  5,  4,  3,  2, 12, 13, 14, 15,
3574 	  18,
3575 	  19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
3576 	  0,  1, 16, 17,
3577 	  35,
3578 	};
3579 
3580       memcpy (reg_alloc_order, TARGET_WINDOWED_ABI ?
3581 	      reg_nonleaf_alloc_order : reg_nonleaf_alloc_order_call0,
3582 	      FIRST_PSEUDO_REGISTER * sizeof (int));
3583     }
3584   else
3585     {
3586       int i, num_arg_regs;
3587       int nxt = 0;
3588 
3589       /* Use the AR registers in increasing order (skipping a0 and a1)
3590 	 but save the incoming argument registers for a last resort.  */
3591       num_arg_regs = crtl->args.info.arg_words;
3592       if (num_arg_regs > MAX_ARGS_IN_REGISTERS)
3593 	num_arg_regs = MAX_ARGS_IN_REGISTERS;
3594       for (i = GP_ARG_FIRST; i < 16 - num_arg_regs; i++)
3595 	reg_alloc_order[nxt++] = i + num_arg_regs;
3596       for (i = 0; i < num_arg_regs; i++)
3597 	reg_alloc_order[nxt++] = GP_ARG_FIRST + i;
3598 
3599       /* List the coprocessor registers in order.  */
3600       for (i = 0; i < BR_REG_NUM; i++)
3601 	reg_alloc_order[nxt++] = BR_REG_FIRST + i;
3602 
3603       /* List the FP registers in order for now.  */
3604       for (i = 0; i < 16; i++)
3605 	reg_alloc_order[nxt++] = FP_REG_FIRST + i;
3606 
3607       /* GCC requires that we list *all* the registers....  */
3608       reg_alloc_order[nxt++] = 0;	/* a0 = return address */
3609       reg_alloc_order[nxt++] = 1;	/* a1 = stack pointer */
3610       reg_alloc_order[nxt++] = 16;	/* pseudo frame pointer */
3611       reg_alloc_order[nxt++] = 17;	/* pseudo arg pointer */
3612 
3613       reg_alloc_order[nxt++] = ACC_REG_FIRST;	/* MAC16 accumulator */
3614     }
3615 }
3616 
3617 
3618 /* Some Xtensa targets support multiple bss sections.  If the section
3619    name ends with ".bss", add SECTION_BSS to the flags.  */
3620 
3621 static unsigned int
3622 xtensa_multibss_section_type_flags (tree decl, const char *name, int reloc)
3623 {
3624   unsigned int flags = default_section_type_flags (decl, name, reloc);
3625   const char *suffix;
3626 
3627   suffix = strrchr (name, '.');
3628   if (suffix && strcmp (suffix, ".bss") == 0)
3629     {
3630       if (!decl || (TREE_CODE (decl) == VAR_DECL
3631 		    && DECL_INITIAL (decl) == NULL_TREE))
3632 	flags |= SECTION_BSS;  /* @nobits */
3633       else
3634 	warning (0, "only uninitialized variables can be placed in a "
3635 		 ".bss section");
3636     }
3637 
3638   return flags;
3639 }
3640 
3641 
3642 /* The literal pool stays with the function.  */
3643 
3644 static section *
3645 xtensa_select_rtx_section (machine_mode mode ATTRIBUTE_UNUSED,
3646 			   rtx x ATTRIBUTE_UNUSED,
3647 			   unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
3648 {
3649   return function_section (current_function_decl);
3650 }
3651 
3652 /* Worker function for TARGET_REGISTER_MOVE_COST.  */
3653 
3654 static int
3655 xtensa_register_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
3656 			   reg_class_t from, reg_class_t to)
3657 {
3658   if (from == to && from != BR_REGS && to != BR_REGS)
3659     return 2;
3660   else if (reg_class_subset_p (from, AR_REGS)
3661 	   && reg_class_subset_p (to, AR_REGS))
3662     return 2;
3663   else if (reg_class_subset_p (from, AR_REGS) && to == ACC_REG)
3664     return 3;
3665   else if (from == ACC_REG && reg_class_subset_p (to, AR_REGS))
3666     return 3;
3667   else
3668     return 10;
3669 }
3670 
3671 /* Worker function for TARGET_MEMORY_MOVE_COST.  */
3672 
3673 static int
3674 xtensa_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
3675 			 reg_class_t rclass ATTRIBUTE_UNUSED,
3676 			 bool in ATTRIBUTE_UNUSED)
3677 {
3678   return 4;
3679 }
3680 
3681 /* Compute a (partial) cost for rtx X.  Return true if the complete
3682    cost has been computed, and false if subexpressions should be
3683    scanned.  In either case, *TOTAL contains the cost result.  */
3684 
3685 static bool
3686 xtensa_rtx_costs (rtx x, machine_mode mode, int outer_code,
3687 		  int opno ATTRIBUTE_UNUSED,
3688 		  int *total, bool speed ATTRIBUTE_UNUSED)
3689 {
3690   int code = GET_CODE (x);
3691 
3692   switch (code)
3693     {
3694     case CONST_INT:
3695       switch (outer_code)
3696 	{
3697 	case SET:
3698 	  if (xtensa_simm12b (INTVAL (x)))
3699 	    {
3700 	      *total = 4;
3701 	      return true;
3702 	    }
3703 	  break;
3704 	case PLUS:
3705 	  if (xtensa_simm8 (INTVAL (x))
3706 	      || xtensa_simm8x256 (INTVAL (x)))
3707 	    {
3708 	      *total = 0;
3709 	      return true;
3710 	    }
3711 	  break;
3712 	case AND:
3713 	  if (xtensa_mask_immediate (INTVAL (x)))
3714 	    {
3715 	      *total = 0;
3716 	      return true;
3717 	    }
3718 	  break;
3719 	case COMPARE:
3720 	  if ((INTVAL (x) == 0) || xtensa_b4const (INTVAL (x)))
3721 	    {
3722 	      *total = 0;
3723 	      return true;
3724 	    }
3725 	  break;
3726 	case ASHIFT:
3727 	case ASHIFTRT:
3728 	case LSHIFTRT:
3729 	case ROTATE:
3730 	case ROTATERT:
3731 	  /* No way to tell if X is the 2nd operand so be conservative.  */
3732 	default: break;
3733 	}
3734       if (xtensa_simm12b (INTVAL (x)))
3735 	*total = 5;
3736       else if (TARGET_CONST16)
3737 	*total = COSTS_N_INSNS (2);
3738       else
3739 	*total = 6;
3740       return true;
3741 
3742     case CONST:
3743     case LABEL_REF:
3744     case SYMBOL_REF:
3745       if (TARGET_CONST16)
3746 	*total = COSTS_N_INSNS (2);
3747       else
3748 	*total = 5;
3749       return true;
3750 
3751     case CONST_DOUBLE:
3752       if (TARGET_CONST16)
3753 	*total = COSTS_N_INSNS (4);
3754       else
3755 	*total = 7;
3756       return true;
3757 
3758     case MEM:
3759       {
3760 	int num_words =
3761 	  (GET_MODE_SIZE (mode) > UNITS_PER_WORD) ?  2 : 1;
3762 
3763 	if (memory_address_p (mode, XEXP ((x), 0)))
3764 	  *total = COSTS_N_INSNS (num_words);
3765 	else
3766 	  *total = COSTS_N_INSNS (2*num_words);
3767 	return true;
3768       }
3769 
3770     case FFS:
3771     case CTZ:
3772       *total = COSTS_N_INSNS (TARGET_NSA ? 5 : 50);
3773       return true;
3774 
3775     case CLZ:
3776       *total = COSTS_N_INSNS (TARGET_NSA ? 1 : 50);
3777       return true;
3778 
3779     case NOT:
3780       *total = COSTS_N_INSNS (mode == DImode ? 3 : 2);
3781       return true;
3782 
3783     case AND:
3784     case IOR:
3785     case XOR:
3786       if (mode == DImode)
3787 	*total = COSTS_N_INSNS (2);
3788       else
3789 	*total = COSTS_N_INSNS (1);
3790       return true;
3791 
3792     case ASHIFT:
3793     case ASHIFTRT:
3794     case LSHIFTRT:
3795       if (mode == DImode)
3796 	*total = COSTS_N_INSNS (50);
3797       else
3798 	*total = COSTS_N_INSNS (1);
3799       return true;
3800 
3801     case ABS:
3802       {
3803 	if (mode == SFmode)
3804 	  *total = COSTS_N_INSNS (TARGET_HARD_FLOAT ? 1 : 50);
3805 	else if (mode == DFmode)
3806 	  *total = COSTS_N_INSNS (50);
3807 	else
3808 	  *total = COSTS_N_INSNS (4);
3809 	return true;
3810       }
3811 
3812     case PLUS:
3813     case MINUS:
3814       {
3815 	if (mode == SFmode)
3816 	  *total = COSTS_N_INSNS (TARGET_HARD_FLOAT ? 1 : 50);
3817 	else if (mode == DFmode || mode == DImode)
3818 	  *total = COSTS_N_INSNS (50);
3819 	else
3820 	  *total = COSTS_N_INSNS (1);
3821 	return true;
3822       }
3823 
3824     case NEG:
3825       *total = COSTS_N_INSNS (mode == DImode ? 4 : 2);
3826       return true;
3827 
3828     case MULT:
3829       {
3830 	if (mode == SFmode)
3831 	  *total = COSTS_N_INSNS (TARGET_HARD_FLOAT ? 4 : 50);
3832 	else if (mode == DFmode)
3833 	  *total = COSTS_N_INSNS (50);
3834 	else if (mode == DImode)
3835 	  *total = COSTS_N_INSNS (TARGET_MUL32_HIGH ? 10 : 50);
3836 	else if (TARGET_MUL32)
3837 	  *total = COSTS_N_INSNS (4);
3838 	else if (TARGET_MAC16)
3839 	  *total = COSTS_N_INSNS (16);
3840 	else if (TARGET_MUL16)
3841 	  *total = COSTS_N_INSNS (12);
3842 	else
3843 	  *total = COSTS_N_INSNS (50);
3844 	return true;
3845       }
3846 
3847     case DIV:
3848     case MOD:
3849       {
3850 	if (mode == SFmode)
3851 	  {
3852 	    *total = COSTS_N_INSNS (TARGET_HARD_FLOAT_DIV ? 8 : 50);
3853 	    return true;
3854 	  }
3855 	else if (mode == DFmode)
3856 	  {
3857 	    *total = COSTS_N_INSNS (50);
3858 	    return true;
3859 	  }
3860       }
3861       /* Fall through.  */
3862 
3863     case UDIV:
3864     case UMOD:
3865       {
3866 	if (mode == DImode)
3867 	  *total = COSTS_N_INSNS (50);
3868 	else if (TARGET_DIV32)
3869 	  *total = COSTS_N_INSNS (32);
3870 	else
3871 	  *total = COSTS_N_INSNS (50);
3872 	return true;
3873       }
3874 
3875     case SQRT:
3876       if (mode == SFmode)
3877 	*total = COSTS_N_INSNS (TARGET_HARD_FLOAT_SQRT ? 8 : 50);
3878       else
3879 	*total = COSTS_N_INSNS (50);
3880       return true;
3881 
3882     case SMIN:
3883     case UMIN:
3884     case SMAX:
3885     case UMAX:
3886       *total = COSTS_N_INSNS (TARGET_MINMAX ? 1 : 50);
3887       return true;
3888 
3889     case SIGN_EXTRACT:
3890     case SIGN_EXTEND:
3891       *total = COSTS_N_INSNS (TARGET_SEXT ? 1 : 2);
3892       return true;
3893 
3894     case ZERO_EXTRACT:
3895     case ZERO_EXTEND:
3896       *total = COSTS_N_INSNS (1);
3897       return true;
3898 
3899     default:
3900       return false;
3901     }
3902 }
3903 
3904 /* Worker function for TARGET_RETURN_IN_MEMORY.  */
3905 
3906 static bool
3907 xtensa_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
3908 {
3909   return ((unsigned HOST_WIDE_INT) int_size_in_bytes (type)
3910 	  > 4 * UNITS_PER_WORD);
3911 }
3912 
3913 /* Worker function for TARGET_FUNCTION_VALUE.  */
3914 
3915 rtx
3916 xtensa_function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED,
3917                       bool outgoing)
3918 {
3919   return gen_rtx_REG ((INTEGRAL_TYPE_P (valtype)
3920                       && TYPE_PRECISION (valtype) < BITS_PER_WORD)
3921                      ? SImode : TYPE_MODE (valtype),
3922                      outgoing ? GP_OUTGOING_RETURN : GP_RETURN);
3923 }
3924 
3925 /* Worker function for TARGET_LIBCALL_VALUE.  */
3926 
3927 static rtx
3928 xtensa_libcall_value (machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
3929 {
3930   return gen_rtx_REG ((GET_MODE_CLASS (mode) == MODE_INT
3931 		       && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
3932 		      ? SImode : mode, GP_RETURN);
3933 }
3934 
3935 /* Worker function TARGET_FUNCTION_VALUE_REGNO_P.  */
3936 
3937 static bool
3938 xtensa_function_value_regno_p (const unsigned int regno)
3939 {
3940   return (regno == GP_RETURN);
3941 }
3942 
3943 /* The static chain is passed in memory.  Provide rtx giving 'mem'
3944    expressions that denote where they are stored.  */
3945 
3946 static rtx
3947 xtensa_static_chain (const_tree ARG_UNUSED (fndecl_or_type), bool incoming_p)
3948 {
3949   if (TARGET_WINDOWED_ABI)
3950     {
3951       rtx base = incoming_p ? arg_pointer_rtx : stack_pointer_rtx;
3952       return gen_frame_mem (Pmode, plus_constant (Pmode, base,
3953 						  -5 * UNITS_PER_WORD));
3954     }
3955   else
3956     return gen_rtx_REG (Pmode, A8_REG);
3957 }
3958 
3959 
3960 /* TRAMPOLINE_TEMPLATE: For Xtensa, the trampoline must perform an ENTRY
3961    instruction with a minimal stack frame in order to get some free
3962    registers.  Once the actual call target is known, the proper stack frame
3963    size is extracted from the ENTRY instruction at the target and the
3964    current frame is adjusted to match.  The trampoline then transfers
3965    control to the instruction following the ENTRY at the target.  Note:
3966    this assumes that the target begins with an ENTRY instruction.  */
3967 
3968 static void
3969 xtensa_asm_trampoline_template (FILE *stream)
3970 {
3971   bool use_call0 = (TARGET_CONST16 || TARGET_ABSOLUTE_LITERALS);
3972 
3973   fprintf (stream, "\t.begin no-transform\n");
3974 
3975   if (TARGET_WINDOWED_ABI)
3976     {
3977       fprintf (stream, "\tentry\tsp, %d\n", MIN_FRAME_SIZE);
3978 
3979       if (use_call0)
3980 	{
3981 	  /* Save the return address.  */
3982 	  fprintf (stream, "\tmov\ta10, a0\n");
3983 
3984 	  /* Use a CALL0 instruction to skip past the constants and in the
3985 	     process get the PC into A0.  This allows PC-relative access to
3986 	     the constants without relying on L32R.  */
3987 	  fprintf (stream, "\tcall0\t.Lskipconsts\n");
3988 	}
3989       else
3990 	fprintf (stream, "\tj\t.Lskipconsts\n");
3991 
3992       fprintf (stream, "\t.align\t4\n");
3993       fprintf (stream, ".Lchainval:%s0\n", integer_asm_op (4, TRUE));
3994       fprintf (stream, ".Lfnaddr:%s0\n", integer_asm_op (4, TRUE));
3995       fprintf (stream, ".Lskipconsts:\n");
3996 
3997       /* Load the static chain and function address from the trampoline.  */
3998       if (use_call0)
3999 	{
4000 	  fprintf (stream, "\taddi\ta0, a0, 3\n");
4001 	  fprintf (stream, "\tl32i\ta9, a0, 0\n");
4002 	  fprintf (stream, "\tl32i\ta8, a0, 4\n");
4003 	}
4004       else
4005 	{
4006 	  fprintf (stream, "\tl32r\ta9, .Lchainval\n");
4007 	  fprintf (stream, "\tl32r\ta8, .Lfnaddr\n");
4008 	}
4009 
4010       /* Store the static chain.  */
4011       fprintf (stream, "\ts32i\ta9, sp, %d\n", MIN_FRAME_SIZE - 20);
4012 
4013       /* Set the proper stack pointer value.  */
4014       fprintf (stream, "\tl32i\ta9, a8, 0\n");
4015       fprintf (stream, "\textui\ta9, a9, %d, 12\n",
4016 	       TARGET_BIG_ENDIAN ? 8 : 12);
4017       fprintf (stream, "\tslli\ta9, a9, 3\n");
4018       fprintf (stream, "\taddi\ta9, a9, %d\n", -MIN_FRAME_SIZE);
4019       fprintf (stream, "\tsub\ta9, sp, a9\n");
4020       fprintf (stream, "\tmovsp\tsp, a9\n");
4021 
4022       if (use_call0)
4023 	/* Restore the return address.  */
4024 	fprintf (stream, "\tmov\ta0, a10\n");
4025 
4026       /* Jump to the instruction following the ENTRY.  */
4027       fprintf (stream, "\taddi\ta8, a8, 3\n");
4028       fprintf (stream, "\tjx\ta8\n");
4029 
4030       /* Pad size to a multiple of TRAMPOLINE_ALIGNMENT.  */
4031       if (use_call0)
4032 	fprintf (stream, "\t.byte\t0\n");
4033       else
4034 	fprintf (stream, "\tnop\n");
4035     }
4036   else
4037     {
4038       if (use_call0)
4039 	{
4040 	  /* Save the return address.  */
4041 	  fprintf (stream, "\tmov\ta10, a0\n");
4042 
4043 	  /* Use a CALL0 instruction to skip past the constants and in the
4044 	     process get the PC into A0.  This allows PC-relative access to
4045 	     the constants without relying on L32R.  */
4046 	  fprintf (stream, "\tcall0\t.Lskipconsts\n");
4047 	}
4048       else
4049 	fprintf (stream, "\tj\t.Lskipconsts\n");
4050 
4051       fprintf (stream, "\t.align\t4\n");
4052       fprintf (stream, ".Lchainval:%s0\n", integer_asm_op (4, TRUE));
4053       fprintf (stream, ".Lfnaddr:%s0\n", integer_asm_op (4, TRUE));
4054       fprintf (stream, ".Lskipconsts:\n");
4055 
4056       /* Load the static chain and function address from the trampoline.  */
4057       if (use_call0)
4058 	{
4059 	  fprintf (stream, "\taddi\ta0, a0, 3\n");
4060 	  fprintf (stream, "\tl32i\ta8, a0, 0\n");
4061 	  fprintf (stream, "\tl32i\ta9, a0, 4\n");
4062 	  fprintf (stream, "\tmov\ta0, a10\n");
4063 	}
4064       else
4065 	{
4066 	  fprintf (stream, "\tl32r\ta8, .Lchainval\n");
4067 	  fprintf (stream, "\tl32r\ta9, .Lfnaddr\n");
4068 	}
4069       fprintf (stream, "\tjx\ta9\n");
4070 
4071       /* Pad size to a multiple of TRAMPOLINE_ALIGNMENT.  */
4072       if (use_call0)
4073 	fprintf (stream, "\t.byte\t0\n");
4074       else
4075 	fprintf (stream, "\tnop\n");
4076     }
4077   fprintf (stream, "\t.end no-transform\n");
4078 }
4079 
4080 static void
4081 xtensa_trampoline_init (rtx m_tramp, tree fndecl, rtx chain)
4082 {
4083   rtx func = XEXP (DECL_RTL (fndecl), 0);
4084   bool use_call0 = (TARGET_CONST16 || TARGET_ABSOLUTE_LITERALS);
4085   int chain_off;
4086   int func_off;
4087 
4088   if (TARGET_WINDOWED_ABI)
4089     {
4090       chain_off = use_call0 ? 12 : 8;
4091       func_off = use_call0 ? 16 : 12;
4092     }
4093   else
4094     {
4095       chain_off = use_call0 ? 8 : 4;
4096       func_off = use_call0 ? 12 : 8;
4097     }
4098 
4099   emit_block_move (m_tramp, assemble_trampoline_template (),
4100 		   GEN_INT (TRAMPOLINE_SIZE), BLOCK_OP_NORMAL);
4101 
4102   emit_move_insn (adjust_address (m_tramp, SImode, chain_off), chain);
4103   emit_move_insn (adjust_address (m_tramp, SImode, func_off), func);
4104   emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__xtensa_sync_caches"),
4105 		     LCT_NORMAL, VOIDmode, XEXP (m_tramp, 0), Pmode);
4106 }
4107 
4108 /* Implement TARGET_LEGITIMATE_CONSTANT_P.  */
4109 
4110 static bool
4111 xtensa_legitimate_constant_p (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
4112 {
4113   return !xtensa_tls_referenced_p (x);
4114 }
4115 
4116 /* Implement TARGET_CAN_USE_DOLOOP_P.  */
4117 
4118 static bool
4119 xtensa_can_use_doloop_p (const widest_int &, const widest_int &,
4120                          unsigned int loop_depth, bool entered_at_top)
4121 {
4122   /* Considering limitations in the hardware, only use doloop
4123      for innermost loops which must be entered from the top.  */
4124   if (loop_depth > 1 || !entered_at_top)
4125     return false;
4126 
4127   return true;
4128 }
4129 
4130 /* NULL if INSN insn is valid within a low-overhead loop.
4131    Otherwise return why doloop cannot be applied.  */
4132 
4133 static const char *
4134 xtensa_invalid_within_doloop (const rtx_insn *insn)
4135 {
4136   if (CALL_P (insn))
4137     return "Function call in the loop.";
4138 
4139   if (JUMP_P (insn) && INSN_CODE (insn) == CODE_FOR_return)
4140     return "Return from a call instruction in the loop.";
4141 
4142   return NULL;
4143 }
4144 
4145 /* Optimize LOOP.  */
4146 
4147 static bool
4148 hwloop_optimize (hwloop_info loop)
4149 {
4150   int i;
4151   edge entry_edge;
4152   basic_block entry_bb;
4153   rtx iter_reg;
4154   rtx_insn *insn, *seq, *entry_after;
4155 
4156   if (loop->depth > 1)
4157     {
4158       if (dump_file)
4159         fprintf (dump_file, ";; loop %d is not innermost\n",
4160                  loop->loop_no);
4161       return false;
4162     }
4163 
4164   if (!loop->incoming_dest)
4165     {
4166       if (dump_file)
4167         fprintf (dump_file, ";; loop %d has more than one entry\n",
4168                  loop->loop_no);
4169       return false;
4170     }
4171 
4172   if (loop->incoming_dest != loop->head)
4173     {
4174       if (dump_file)
4175         fprintf (dump_file, ";; loop %d is not entered from head\n",
4176                  loop->loop_no);
4177       return false;
4178     }
4179 
4180   if (loop->has_call || loop->has_asm)
4181     {
4182       if (dump_file)
4183         fprintf (dump_file, ";; loop %d has invalid insn\n",
4184                  loop->loop_no);
4185       return false;
4186     }
4187 
4188   /* Scan all the blocks to make sure they don't use iter_reg.  */
4189   if (loop->iter_reg_used || loop->iter_reg_used_outside)
4190     {
4191       if (dump_file)
4192         fprintf (dump_file, ";; loop %d uses iterator\n",
4193                  loop->loop_no);
4194       return false;
4195     }
4196 
4197   /* Check if start_label appears before doloop_end.  */
4198   insn = loop->start_label;
4199   while (insn && insn != loop->loop_end)
4200     insn = NEXT_INSN (insn);
4201 
4202   if (!insn)
4203     {
4204       if (dump_file)
4205         fprintf (dump_file, ";; loop %d start_label not before loop_end\n",
4206                  loop->loop_no);
4207       return false;
4208     }
4209 
4210   /* Get the loop iteration register.  */
4211   iter_reg = loop->iter_reg;
4212 
4213   gcc_assert (REG_P (iter_reg));
4214 
4215   entry_edge = NULL;
4216 
4217   FOR_EACH_VEC_SAFE_ELT (loop->incoming, i, entry_edge)
4218     if (entry_edge->flags & EDGE_FALLTHRU)
4219       break;
4220 
4221   if (entry_edge == NULL)
4222     return false;
4223 
4224   /* Place the zero_cost_loop_start instruction before the loop.  */
4225   entry_bb = entry_edge->src;
4226 
4227   start_sequence ();
4228 
4229   insn = emit_insn (gen_zero_cost_loop_start (loop->iter_reg,
4230                                               loop->start_label,
4231                                               loop->iter_reg));
4232 
4233   seq = get_insns ();
4234 
4235   if (!single_succ_p (entry_bb) || vec_safe_length (loop->incoming) > 1)
4236     {
4237       basic_block new_bb;
4238       edge e;
4239       edge_iterator ei;
4240 
4241       emit_insn_before (seq, BB_HEAD (loop->head));
4242       seq = emit_label_before (gen_label_rtx (), seq);
4243       new_bb = create_basic_block (seq, insn, entry_bb);
4244       FOR_EACH_EDGE (e, ei, loop->incoming)
4245         {
4246           if (!(e->flags & EDGE_FALLTHRU))
4247             redirect_edge_and_branch_force (e, new_bb);
4248           else
4249             redirect_edge_succ (e, new_bb);
4250         }
4251 
4252       make_edge (new_bb, loop->head, 0);
4253     }
4254   else
4255     {
4256       entry_after = BB_END (entry_bb);
4257       while (DEBUG_INSN_P (entry_after)
4258              || (NOTE_P (entry_after)
4259 		 && NOTE_KIND (entry_after) != NOTE_INSN_BASIC_BLOCK))
4260         entry_after = PREV_INSN (entry_after);
4261 
4262       emit_insn_after (seq, entry_after);
4263     }
4264 
4265   end_sequence ();
4266 
4267   return true;
4268 }
4269 
4270 /* A callback for the hw-doloop pass.  Called when a loop we have discovered
4271    turns out not to be optimizable; we have to split the loop_end pattern into
4272    a subtract and a test.  */
4273 
4274 static void
4275 hwloop_fail (hwloop_info loop)
4276 {
4277   rtx test;
4278   rtx_insn *insn = loop->loop_end;
4279 
4280   emit_insn_before (gen_addsi3 (loop->iter_reg,
4281                                 loop->iter_reg,
4282                                 constm1_rtx),
4283                     loop->loop_end);
4284 
4285   test = gen_rtx_NE (VOIDmode, loop->iter_reg, const0_rtx);
4286   insn = emit_jump_insn_before (gen_cbranchsi4 (test,
4287                                                 loop->iter_reg, const0_rtx,
4288                                                 loop->start_label),
4289                                 loop->loop_end);
4290 
4291   JUMP_LABEL (insn) = loop->start_label;
4292   LABEL_NUSES (loop->start_label)++;
4293   delete_insn (loop->loop_end);
4294 }
4295 
4296 /* A callback for the hw-doloop pass.  This function examines INSN; if
4297    it is a doloop_end pattern we recognize, return the reg rtx for the
4298    loop counter.  Otherwise, return NULL_RTX.  */
4299 
4300 static rtx
4301 hwloop_pattern_reg (rtx_insn *insn)
4302 {
4303   rtx reg;
4304 
4305   if (!JUMP_P (insn) || recog_memoized (insn) != CODE_FOR_loop_end)
4306     return NULL_RTX;
4307 
4308   reg = SET_DEST (XVECEXP (PATTERN (insn), 0, 1));
4309   if (!REG_P (reg))
4310     return NULL_RTX;
4311 
4312   return reg;
4313 }
4314 
4315 
4316 static struct hw_doloop_hooks xtensa_doloop_hooks =
4317 {
4318   hwloop_pattern_reg,
4319   hwloop_optimize,
4320   hwloop_fail
4321 };
4322 
4323 /* Run from machine_dependent_reorg, this pass looks for doloop_end insns
4324    and tries to rewrite the RTL of these loops so that proper Xtensa
4325    hardware loops are generated.  */
4326 
4327 static void
4328 xtensa_reorg_loops (void)
4329 {
4330   if (TARGET_LOOPS)
4331     reorg_loops (false, &xtensa_doloop_hooks);
4332 }
4333 
4334 /* Implement the TARGET_MACHINE_DEPENDENT_REORG pass.  */
4335 
4336 static void
4337 xtensa_reorg (void)
4338 {
4339   /* We are freeing block_for_insn in the toplev to keep compatibility
4340      with old MDEP_REORGS that are not CFG based.  Recompute it now.  */
4341   compute_bb_for_insn ();
4342 
4343   df_analyze ();
4344 
4345   /* Doloop optimization.  */
4346   xtensa_reorg_loops ();
4347 }
4348 
4349 /* Update register usage after having seen the compiler flags.  */
4350 
4351 static void
4352 xtensa_conditional_register_usage (void)
4353 {
4354   unsigned i, c_mask;
4355 
4356   c_mask = TARGET_WINDOWED_ABI ? (1 << 1) : (1 << 2);
4357 
4358   for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
4359     {
4360       /* Set/reset conditionally defined registers from
4361 	 CALL_USED_REGISTERS initializer.  */
4362       if (call_used_regs[i] > 1)
4363 	call_used_regs[i] = !!(call_used_regs[i] & c_mask);
4364     }
4365 
4366   /* Remove hard FP register from the preferred reload registers set.  */
4367   CLEAR_HARD_REG_BIT (reg_class_contents[(int)RL_REGS],
4368 		      HARD_FRAME_POINTER_REGNUM);
4369 }
4370 
4371 /* Map hard register number to register class */
4372 
4373 enum reg_class xtensa_regno_to_class (int regno)
4374 {
4375   static const enum reg_class regno_to_class[FIRST_PSEUDO_REGISTER] =
4376     {
4377       RL_REGS,	SP_REG,		RL_REGS,	RL_REGS,
4378       RL_REGS,	RL_REGS,	RL_REGS,	RL_REGS,
4379       RL_REGS,	RL_REGS,	RL_REGS,	RL_REGS,
4380       RL_REGS,	RL_REGS,	RL_REGS,	RL_REGS,
4381       AR_REGS,	AR_REGS,	BR_REGS,
4382       FP_REGS,	FP_REGS,	FP_REGS,	FP_REGS,
4383       FP_REGS,	FP_REGS,	FP_REGS,	FP_REGS,
4384       FP_REGS,	FP_REGS,	FP_REGS,	FP_REGS,
4385       FP_REGS,	FP_REGS,	FP_REGS,	FP_REGS,
4386       ACC_REG,
4387     };
4388 
4389   if (regno == HARD_FRAME_POINTER_REGNUM)
4390     return GR_REGS;
4391   else
4392     return regno_to_class[regno];
4393 }
4394 
4395 /* Implement TARGET_CONSTANT_ALIGNMENT.  Align string constants and
4396    constructors to at least a word boundary.  The typical use of this
4397    macro is to increase alignment for string constants to be word
4398    aligned so that 'strcpy' calls that copy constants can be done
4399    inline.  */
4400 
4401 static HOST_WIDE_INT
4402 xtensa_constant_alignment (const_tree exp, HOST_WIDE_INT align)
4403 {
4404   if ((TREE_CODE (exp) == STRING_CST || TREE_CODE (exp) == CONSTRUCTOR)
4405       && !optimize_size)
4406     return MAX (align, BITS_PER_WORD);
4407   return align;
4408 }
4409 
4410 /* Implement TARGET_STARTING_FRAME_OFFSET.  */
4411 
4412 static HOST_WIDE_INT
4413 xtensa_starting_frame_offset (void)
4414 {
4415   if (FRAME_GROWS_DOWNWARD)
4416     return 0;
4417   return crtl->outgoing_args_size;
4418 }
4419 
4420 /* Implement TARGET_ASAN_SHADOW_OFFSET.  */
4421 
4422 static unsigned HOST_WIDE_INT
4423 xtensa_asan_shadow_offset (void)
4424 {
4425   return HOST_WIDE_INT_UC (0x10000000);
4426 }
4427 
4428 #include "gt-xtensa.h"
4429