xref: /netbsd-src/external/gpl3/gcc.old/dist/gcc/config/m32c/m32c.c (revision c9496f6b604074a9451a67df576a5b423068e71e)
1 /* Target Code for R8C/M16C/M32C
2    Copyright (C) 2005-2015 Free Software Foundation, Inc.
3    Contributed by Red Hat.
4 
5    This file is part of GCC.
6 
7    GCC is free software; you can redistribute it and/or modify it
8    under the terms of the GNU General Public License as published
9    by the Free Software Foundation; either version 3, or (at your
10    option) any later version.
11 
12    GCC is distributed in the hope that it will be useful, but WITHOUT
13    ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14    or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
15    License for more details.
16 
17    You should have received a copy of the GNU General Public License
18    along with GCC; see the file COPYING3.  If not see
19    <http://www.gnu.org/licenses/>.  */
20 
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "regs.h"
27 #include "hard-reg-set.h"
28 #include "insn-config.h"
29 #include "conditions.h"
30 #include "insn-flags.h"
31 #include "output.h"
32 #include "insn-attr.h"
33 #include "flags.h"
34 #include "recog.h"
35 #include "reload.h"
36 #include "diagnostic-core.h"
37 #include "obstack.h"
38 #include "hash-set.h"
39 #include "machmode.h"
40 #include "vec.h"
41 #include "double-int.h"
42 #include "input.h"
43 #include "alias.h"
44 #include "symtab.h"
45 #include "wide-int.h"
46 #include "inchash.h"
47 #include "tree.h"
48 #include "fold-const.h"
49 #include "stor-layout.h"
50 #include "varasm.h"
51 #include "calls.h"
52 #include "hashtab.h"
53 #include "function.h"
54 #include "statistics.h"
55 #include "real.h"
56 #include "fixed-value.h"
57 #include "expmed.h"
58 #include "dojump.h"
59 #include "explow.h"
60 #include "emit-rtl.h"
61 #include "stmt.h"
62 #include "expr.h"
63 #include "insn-codes.h"
64 #include "optabs.h"
65 #include "except.h"
66 #include "ggc.h"
67 #include "target.h"
68 #include "target-def.h"
69 #include "tm_p.h"
70 #include "langhooks.h"
71 #include "hash-table.h"
72 #include "predict.h"
73 #include "dominance.h"
74 #include "cfg.h"
75 #include "cfgrtl.h"
76 #include "cfganal.h"
77 #include "lcm.h"
78 #include "cfgbuild.h"
79 #include "cfgcleanup.h"
80 #include "basic-block.h"
81 #include "tree-ssa-alias.h"
82 #include "internal-fn.h"
83 #include "gimple-fold.h"
84 #include "tree-eh.h"
85 #include "gimple-expr.h"
86 #include "is-a.h"
87 #include "gimple.h"
88 #include "df.h"
89 #include "tm-constrs.h"
90 #include "builtins.h"
91 
92 /* Prototypes */
93 
94 /* Used by m32c_pushm_popm.  */
95 typedef enum
96 {
97   PP_pushm,
98   PP_popm,
99   PP_justcount
100 } Push_Pop_Type;
101 
102 static bool m32c_function_needs_enter (void);
103 static tree interrupt_handler (tree *, tree, tree, int, bool *);
104 static tree function_vector_handler (tree *, tree, tree, int, bool *);
105 static int interrupt_p (tree node);
106 static int bank_switch_p (tree node);
107 static int fast_interrupt_p (tree node);
108 static int interrupt_p (tree node);
109 static bool m32c_asm_integer (rtx, unsigned int, int);
110 static int m32c_comp_type_attributes (const_tree, const_tree);
111 static bool m32c_fixed_condition_code_regs (unsigned int *, unsigned int *);
112 static struct machine_function *m32c_init_machine_status (void);
113 static void m32c_insert_attributes (tree, tree *);
114 static bool m32c_legitimate_address_p (machine_mode, rtx, bool);
115 static bool m32c_addr_space_legitimate_address_p (machine_mode, rtx, bool, addr_space_t);
116 static rtx m32c_function_arg (cumulative_args_t, machine_mode,
117 			      const_tree, bool);
118 static bool m32c_pass_by_reference (cumulative_args_t, machine_mode,
119 				    const_tree, bool);
120 static void m32c_function_arg_advance (cumulative_args_t, machine_mode,
121 				       const_tree, bool);
122 static unsigned int m32c_function_arg_boundary (machine_mode, const_tree);
123 static int m32c_pushm_popm (Push_Pop_Type);
124 static bool m32c_strict_argument_naming (cumulative_args_t);
125 static rtx m32c_struct_value_rtx (tree, int);
126 static rtx m32c_subreg (machine_mode, rtx, machine_mode, int);
127 static int need_to_save (int);
128 static rtx m32c_function_value (const_tree, const_tree, bool);
129 static rtx m32c_libcall_value (machine_mode, const_rtx);
130 
131 /* Returns true if an address is specified, else false.  */
132 static bool m32c_get_pragma_address (const char *varname, unsigned *addr);
133 
134 #define SYMBOL_FLAG_FUNCVEC_FUNCTION    (SYMBOL_FLAG_MACH_DEP << 0)
135 
136 #define streq(a,b) (strcmp ((a), (b)) == 0)
137 
138 /* Internal support routines */
139 
140 /* Debugging statements are tagged with DEBUG0 only so that they can
141    be easily enabled individually, by replacing the '0' with '1' as
142    needed.  */
143 #define DEBUG0 0
144 #define DEBUG1 1
145 
146 #if DEBUG0
147 #include "print-tree.h"
148 /* This is needed by some of the commented-out debug statements
149    below.  */
150 static char const *class_names[LIM_REG_CLASSES] = REG_CLASS_NAMES;
151 #endif
152 static int class_contents[LIM_REG_CLASSES][1] = REG_CLASS_CONTENTS;
153 
154 /* These are all to support encode_pattern().  */
155 static char pattern[30], *patternp;
156 static GTY(()) rtx patternr[30];
157 #define RTX_IS(x) (streq (pattern, x))
158 
159 /* Some macros to simplify the logic throughout this file.  */
160 #define IS_MEM_REGNO(regno) ((regno) >= MEM0_REGNO && (regno) <= MEM7_REGNO)
161 #define IS_MEM_REG(rtx) (GET_CODE (rtx) == REG && IS_MEM_REGNO (REGNO (rtx)))
162 
163 #define IS_CR_REGNO(regno) ((regno) >= SB_REGNO && (regno) <= PC_REGNO)
164 #define IS_CR_REG(rtx) (GET_CODE (rtx) == REG && IS_CR_REGNO (REGNO (rtx)))
165 
166 static int
167 far_addr_space_p (rtx x)
168 {
169   if (GET_CODE (x) != MEM)
170     return 0;
171 #if DEBUG0
172   fprintf(stderr, "\033[35mfar_addr_space: "); debug_rtx(x);
173   fprintf(stderr, " = %d\033[0m\n", MEM_ADDR_SPACE (x) == ADDR_SPACE_FAR);
174 #endif
175   return MEM_ADDR_SPACE (x) == ADDR_SPACE_FAR;
176 }
177 
178 /* We do most RTX matching by converting the RTX into a string, and
179    using string compares.  This vastly simplifies the logic in many of
180    the functions in this file.
181 
182    On exit, pattern[] has the encoded string (use RTX_IS("...") to
183    compare it) and patternr[] has pointers to the nodes in the RTX
184    corresponding to each character in the encoded string.  The latter
185    is mostly used by print_operand().
186 
187    Unrecognized patterns have '?' in them; this shows up when the
188    assembler complains about syntax errors.
189 */
190 
191 static void
192 encode_pattern_1 (rtx x)
193 {
194   int i;
195 
196   if (patternp == pattern + sizeof (pattern) - 2)
197     {
198       patternp[-1] = '?';
199       return;
200     }
201 
202   patternr[patternp - pattern] = x;
203 
204   switch (GET_CODE (x))
205     {
206     case REG:
207       *patternp++ = 'r';
208       break;
209     case SUBREG:
210       if (GET_MODE_SIZE (GET_MODE (x)) !=
211 	  GET_MODE_SIZE (GET_MODE (XEXP (x, 0))))
212 	*patternp++ = 'S';
213       if (GET_MODE (x) == PSImode
214 	  && GET_CODE (XEXP (x, 0)) == REG)
215 	*patternp++ = 'S';
216       encode_pattern_1 (XEXP (x, 0));
217       break;
218     case MEM:
219       *patternp++ = 'm';
220     case CONST:
221       encode_pattern_1 (XEXP (x, 0));
222       break;
223     case SIGN_EXTEND:
224       *patternp++ = '^';
225       *patternp++ = 'S';
226       encode_pattern_1 (XEXP (x, 0));
227       break;
228     case ZERO_EXTEND:
229       *patternp++ = '^';
230       *patternp++ = 'Z';
231       encode_pattern_1 (XEXP (x, 0));
232       break;
233     case PLUS:
234       *patternp++ = '+';
235       encode_pattern_1 (XEXP (x, 0));
236       encode_pattern_1 (XEXP (x, 1));
237       break;
238     case PRE_DEC:
239       *patternp++ = '>';
240       encode_pattern_1 (XEXP (x, 0));
241       break;
242     case POST_INC:
243       *patternp++ = '<';
244       encode_pattern_1 (XEXP (x, 0));
245       break;
246     case LO_SUM:
247       *patternp++ = 'L';
248       encode_pattern_1 (XEXP (x, 0));
249       encode_pattern_1 (XEXP (x, 1));
250       break;
251     case HIGH:
252       *patternp++ = 'H';
253       encode_pattern_1 (XEXP (x, 0));
254       break;
255     case SYMBOL_REF:
256       *patternp++ = 's';
257       break;
258     case LABEL_REF:
259       *patternp++ = 'l';
260       break;
261     case CODE_LABEL:
262       *patternp++ = 'c';
263       break;
264     case CONST_INT:
265     case CONST_DOUBLE:
266       *patternp++ = 'i';
267       break;
268     case UNSPEC:
269       *patternp++ = 'u';
270       *patternp++ = '0' + XCINT (x, 1, UNSPEC);
271       for (i = 0; i < XVECLEN (x, 0); i++)
272 	encode_pattern_1 (XVECEXP (x, 0, i));
273       break;
274     case USE:
275       *patternp++ = 'U';
276       break;
277     case PARALLEL:
278       *patternp++ = '|';
279       for (i = 0; i < XVECLEN (x, 0); i++)
280 	encode_pattern_1 (XVECEXP (x, 0, i));
281       break;
282     case EXPR_LIST:
283       *patternp++ = 'E';
284       encode_pattern_1 (XEXP (x, 0));
285       if (XEXP (x, 1))
286 	encode_pattern_1 (XEXP (x, 1));
287       break;
288     default:
289       *patternp++ = '?';
290 #if DEBUG0
291       fprintf (stderr, "can't encode pattern %s\n",
292 	       GET_RTX_NAME (GET_CODE (x)));
293       debug_rtx (x);
294 #endif
295       break;
296     }
297 }
298 
299 static void
300 encode_pattern (rtx x)
301 {
302   patternp = pattern;
303   encode_pattern_1 (x);
304   *patternp = 0;
305 }
306 
307 /* Since register names indicate the mode they're used in, we need a
308    way to determine which name to refer to the register with.  Called
309    by print_operand().  */
310 
311 static const char *
312 reg_name_with_mode (int regno, machine_mode mode)
313 {
314   int mlen = GET_MODE_SIZE (mode);
315   if (regno == R0_REGNO && mlen == 1)
316     return "r0l";
317   if (regno == R0_REGNO && (mlen == 3 || mlen == 4))
318     return "r2r0";
319   if (regno == R0_REGNO && mlen == 6)
320     return "r2r1r0";
321   if (regno == R0_REGNO && mlen == 8)
322     return "r3r1r2r0";
323   if (regno == R1_REGNO && mlen == 1)
324     return "r1l";
325   if (regno == R1_REGNO && (mlen == 3 || mlen == 4))
326     return "r3r1";
327   if (regno == A0_REGNO && TARGET_A16 && (mlen == 3 || mlen == 4))
328     return "a1a0";
329   return reg_names[regno];
330 }
331 
332 /* How many bytes a register uses on stack when it's pushed.  We need
333    to know this because the push opcode needs to explicitly indicate
334    the size of the register, even though the name of the register
335    already tells it that.  Used by m32c_output_reg_{push,pop}, which
336    is only used through calls to ASM_OUTPUT_REG_{PUSH,POP}.  */
337 
338 static int
339 reg_push_size (int regno)
340 {
341   switch (regno)
342     {
343     case R0_REGNO:
344     case R1_REGNO:
345       return 2;
346     case R2_REGNO:
347     case R3_REGNO:
348     case FLG_REGNO:
349       return 2;
350     case A0_REGNO:
351     case A1_REGNO:
352     case SB_REGNO:
353     case FB_REGNO:
354     case SP_REGNO:
355       if (TARGET_A16)
356 	return 2;
357       else
358 	return 3;
359     default:
360       gcc_unreachable ();
361     }
362 }
363 
364 /* Given two register classes, find the largest intersection between
365    them.  If there is no intersection, return RETURNED_IF_EMPTY
366    instead.  */
367 static reg_class_t
368 reduce_class (reg_class_t original_class, reg_class_t limiting_class,
369 	      reg_class_t returned_if_empty)
370 {
371   HARD_REG_SET cc;
372   int i;
373   reg_class_t best = NO_REGS;
374   unsigned int best_size = 0;
375 
376   if (original_class == limiting_class)
377     return original_class;
378 
379   cc = reg_class_contents[original_class];
380   AND_HARD_REG_SET (cc, reg_class_contents[limiting_class]);
381 
382   for (i = 0; i < LIM_REG_CLASSES; i++)
383     {
384       if (hard_reg_set_subset_p (reg_class_contents[i], cc))
385 	if (best_size < reg_class_size[i])
386 	  {
387 	    best = (reg_class_t) i;
388 	    best_size = reg_class_size[i];
389 	  }
390 
391     }
392   if (best == NO_REGS)
393     return returned_if_empty;
394   return best;
395 }
396 
397 /* Used by m32c_register_move_cost to determine if a move is
398    impossibly expensive.  */
399 static bool
400 class_can_hold_mode (reg_class_t rclass, machine_mode mode)
401 {
402   /* Cache the results:  0=untested  1=no  2=yes */
403   static char results[LIM_REG_CLASSES][MAX_MACHINE_MODE];
404 
405   if (results[(int) rclass][mode] == 0)
406     {
407       int r;
408       results[rclass][mode] = 1;
409       for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
410 	if (in_hard_reg_set_p (reg_class_contents[(int) rclass], mode, r)
411 	    && HARD_REGNO_MODE_OK (r, mode))
412 	  {
413 	    results[rclass][mode] = 2;
414 	    break;
415 	  }
416     }
417 
418 #if DEBUG0
419   fprintf (stderr, "class %s can hold %s? %s\n",
420 	   class_names[(int) rclass], mode_name[mode],
421 	   (results[rclass][mode] == 2) ? "yes" : "no");
422 #endif
423   return results[(int) rclass][mode] == 2;
424 }
425 
426 /* Run-time Target Specification.  */
427 
428 /* Memregs are memory locations that gcc treats like general
429    registers, as there are a limited number of true registers and the
430    m32c families can use memory in most places that registers can be
431    used.
432 
433    However, since memory accesses are more expensive than registers,
434    we allow the user to limit the number of memregs available, in
435    order to try to persuade gcc to try harder to use real registers.
436 
437    Memregs are provided by lib1funcs.S.
438 */
439 
440 int ok_to_change_target_memregs = TRUE;
441 
442 /* Implements TARGET_OPTION_OVERRIDE.  */
443 
444 #undef TARGET_OPTION_OVERRIDE
445 #define TARGET_OPTION_OVERRIDE m32c_option_override
446 
447 static void
448 m32c_option_override (void)
449 {
450   /* We limit memregs to 0..16, and provide a default.  */
451   if (global_options_set.x_target_memregs)
452     {
453       if (target_memregs < 0 || target_memregs > 16)
454 	error ("invalid target memregs value '%d'", target_memregs);
455     }
456   else
457     target_memregs = 16;
458 
459   if (TARGET_A24)
460     flag_ivopts = 0;
461 
462   /* This target defaults to strict volatile bitfields.  */
463   if (flag_strict_volatile_bitfields < 0 && abi_version_at_least(2))
464     flag_strict_volatile_bitfields = 1;
465 
466   /* r8c/m16c have no 16-bit indirect call, so thunks are involved.
467      This is always worse than an absolute call.  */
468   if (TARGET_A16)
469     flag_no_function_cse = 1;
470 
471   /* This wants to put insns between compares and their jumps.  */
472   /* FIXME: The right solution is to properly trace the flags register
473      values, but that is too much work for stage 4.  */
474   flag_combine_stack_adjustments = 0;
475 }
476 
477 #undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
478 #define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE m32c_override_options_after_change
479 
480 static void
481 m32c_override_options_after_change (void)
482 {
483   if (TARGET_A16)
484     flag_no_function_cse = 1;
485 }
486 
487 /* Defining data structures for per-function information */
488 
489 /* The usual; we set up our machine_function data.  */
490 static struct machine_function *
491 m32c_init_machine_status (void)
492 {
493   return ggc_cleared_alloc<machine_function> ();
494 }
495 
496 /* Implements INIT_EXPANDERS.  We just set up to call the above
497    function.  */
498 void
499 m32c_init_expanders (void)
500 {
501   init_machine_status = m32c_init_machine_status;
502 }
503 
504 /* Storage Layout */
505 
506 /* Register Basics */
507 
508 /* Basic Characteristics of Registers */
509 
510 /* Whether a mode fits in a register is complex enough to warrant a
511    table.  */
512 static struct
513 {
514   char qi_regs;
515   char hi_regs;
516   char pi_regs;
517   char si_regs;
518   char di_regs;
519 } nregs_table[FIRST_PSEUDO_REGISTER] =
520 {
521   { 1, 1, 2, 2, 4 },		/* r0 */
522   { 0, 1, 0, 0, 0 },		/* r2 */
523   { 1, 1, 2, 2, 0 },		/* r1 */
524   { 0, 1, 0, 0, 0 },		/* r3 */
525   { 0, 1, 1, 0, 0 },		/* a0 */
526   { 0, 1, 1, 0, 0 },		/* a1 */
527   { 0, 1, 1, 0, 0 },		/* sb */
528   { 0, 1, 1, 0, 0 },		/* fb */
529   { 0, 1, 1, 0, 0 },		/* sp */
530   { 1, 1, 1, 0, 0 },		/* pc */
531   { 0, 0, 0, 0, 0 },		/* fl */
532   { 1, 1, 1, 0, 0 },		/* ap */
533   { 1, 1, 2, 2, 4 },		/* mem0 */
534   { 1, 1, 2, 2, 4 },		/* mem1 */
535   { 1, 1, 2, 2, 4 },		/* mem2 */
536   { 1, 1, 2, 2, 4 },		/* mem3 */
537   { 1, 1, 2, 2, 4 },		/* mem4 */
538   { 1, 1, 2, 2, 0 },		/* mem5 */
539   { 1, 1, 2, 2, 0 },		/* mem6 */
540   { 1, 1, 0, 0, 0 },		/* mem7 */
541 };
542 
543 /* Implements TARGET_CONDITIONAL_REGISTER_USAGE.  We adjust the number
544    of available memregs, and select which registers need to be preserved
545    across calls based on the chip family.  */
546 
547 #undef TARGET_CONDITIONAL_REGISTER_USAGE
548 #define TARGET_CONDITIONAL_REGISTER_USAGE m32c_conditional_register_usage
549 void
550 m32c_conditional_register_usage (void)
551 {
552   int i;
553 
554   if (0 <= target_memregs && target_memregs <= 16)
555     {
556       /* The command line option is bytes, but our "registers" are
557 	 16-bit words.  */
558       for (i = (target_memregs+1)/2; i < 8; i++)
559 	{
560 	  fixed_regs[MEM0_REGNO + i] = 1;
561 	  CLEAR_HARD_REG_BIT (reg_class_contents[MEM_REGS], MEM0_REGNO + i);
562 	}
563     }
564 
565   /* M32CM and M32C preserve more registers across function calls.  */
566   if (TARGET_A24)
567     {
568       call_used_regs[R1_REGNO] = 0;
569       call_used_regs[R2_REGNO] = 0;
570       call_used_regs[R3_REGNO] = 0;
571       call_used_regs[A0_REGNO] = 0;
572       call_used_regs[A1_REGNO] = 0;
573     }
574 }
575 
576 /* How Values Fit in Registers */
577 
578 /* Implements HARD_REGNO_NREGS.  This is complicated by the fact that
579    different registers are different sizes from each other, *and* may
580    be different sizes in different chip families.  */
581 static int
582 m32c_hard_regno_nregs_1 (int regno, machine_mode mode)
583 {
584   if (regno == FLG_REGNO && mode == CCmode)
585     return 1;
586   if (regno >= FIRST_PSEUDO_REGISTER)
587     return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
588 
589   if (regno >= MEM0_REGNO && regno <= MEM7_REGNO)
590     return (GET_MODE_SIZE (mode) + 1) / 2;
591 
592   if (GET_MODE_SIZE (mode) <= 1)
593     return nregs_table[regno].qi_regs;
594   if (GET_MODE_SIZE (mode) <= 2)
595     return nregs_table[regno].hi_regs;
596   if (regno == A0_REGNO && mode == SImode && TARGET_A16)
597     return 2;
598   if ((GET_MODE_SIZE (mode) <= 3 || mode == PSImode) && TARGET_A24)
599     return nregs_table[regno].pi_regs;
600   if (GET_MODE_SIZE (mode) <= 4)
601     return nregs_table[regno].si_regs;
602   if (GET_MODE_SIZE (mode) <= 8)
603     return nregs_table[regno].di_regs;
604   return 0;
605 }
606 
607 int
608 m32c_hard_regno_nregs (int regno, machine_mode mode)
609 {
610   int rv = m32c_hard_regno_nregs_1 (regno, mode);
611   return rv ? rv : 1;
612 }
613 
614 /* Implements HARD_REGNO_MODE_OK.  The above function does the work
615    already; just test its return value.  */
616 int
617 m32c_hard_regno_ok (int regno, machine_mode mode)
618 {
619   return m32c_hard_regno_nregs_1 (regno, mode) != 0;
620 }
621 
622 /* Implements MODES_TIEABLE_P.  In general, modes aren't tieable since
623    registers are all different sizes.  However, since most modes are
624    bigger than our registers anyway, it's easier to implement this
625    function that way, leaving QImode as the only unique case.  */
626 int
627 m32c_modes_tieable_p (machine_mode m1, machine_mode m2)
628 {
629   if (GET_MODE_SIZE (m1) == GET_MODE_SIZE (m2))
630     return 1;
631 
632 #if 0
633   if (m1 == QImode || m2 == QImode)
634     return 0;
635 #endif
636 
637   return 1;
638 }
639 
640 /* Register Classes */
641 
642 /* Implements REGNO_REG_CLASS.  */
643 enum reg_class
644 m32c_regno_reg_class (int regno)
645 {
646   switch (regno)
647     {
648     case R0_REGNO:
649       return R0_REGS;
650     case R1_REGNO:
651       return R1_REGS;
652     case R2_REGNO:
653       return R2_REGS;
654     case R3_REGNO:
655       return R3_REGS;
656     case A0_REGNO:
657       return A0_REGS;
658     case A1_REGNO:
659       return A1_REGS;
660     case SB_REGNO:
661       return SB_REGS;
662     case FB_REGNO:
663       return FB_REGS;
664     case SP_REGNO:
665       return SP_REGS;
666     case FLG_REGNO:
667       return FLG_REGS;
668     default:
669       if (IS_MEM_REGNO (regno))
670 	return MEM_REGS;
671       return ALL_REGS;
672     }
673 }
674 
675 /* Implements REGNO_OK_FOR_BASE_P.  */
676 int
677 m32c_regno_ok_for_base_p (int regno)
678 {
679   if (regno == A0_REGNO
680       || regno == A1_REGNO || regno >= FIRST_PSEUDO_REGISTER)
681     return 1;
682   return 0;
683 }
684 
685 /* Implements TARGET_PREFERRED_RELOAD_CLASS.  In general, prefer general
686    registers of the appropriate size.  */
687 
688 #undef TARGET_PREFERRED_RELOAD_CLASS
689 #define TARGET_PREFERRED_RELOAD_CLASS m32c_preferred_reload_class
690 
691 static reg_class_t
692 m32c_preferred_reload_class (rtx x, reg_class_t rclass)
693 {
694   reg_class_t newclass = rclass;
695 
696 #if DEBUG0
697   fprintf (stderr, "\npreferred_reload_class for %s is ",
698 	   class_names[rclass]);
699 #endif
700   if (rclass == NO_REGS)
701     rclass = GET_MODE (x) == QImode ? HL_REGS : R03_REGS;
702 
703   if (reg_classes_intersect_p (rclass, CR_REGS))
704     {
705       switch (GET_MODE (x))
706 	{
707 	case QImode:
708 	  newclass = HL_REGS;
709 	  break;
710 	default:
711 	  /*      newclass = HI_REGS; */
712 	  break;
713 	}
714     }
715 
716   else if (newclass == QI_REGS && GET_MODE_SIZE (GET_MODE (x)) > 2)
717     newclass = SI_REGS;
718   else if (GET_MODE_SIZE (GET_MODE (x)) > 4
719 	   && ! reg_class_subset_p (R03_REGS, rclass))
720     newclass = DI_REGS;
721 
722   rclass = reduce_class (rclass, newclass, rclass);
723 
724   if (GET_MODE (x) == QImode)
725     rclass = reduce_class (rclass, HL_REGS, rclass);
726 
727 #if DEBUG0
728   fprintf (stderr, "%s\n", class_names[rclass]);
729   debug_rtx (x);
730 
731   if (GET_CODE (x) == MEM
732       && GET_CODE (XEXP (x, 0)) == PLUS
733       && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
734     fprintf (stderr, "Glorm!\n");
735 #endif
736   return rclass;
737 }
738 
739 /* Implements TARGET_PREFERRED_OUTPUT_RELOAD_CLASS.  */
740 
741 #undef TARGET_PREFERRED_OUTPUT_RELOAD_CLASS
742 #define TARGET_PREFERRED_OUTPUT_RELOAD_CLASS m32c_preferred_output_reload_class
743 
744 static reg_class_t
745 m32c_preferred_output_reload_class (rtx x, reg_class_t rclass)
746 {
747   return m32c_preferred_reload_class (x, rclass);
748 }
749 
750 /* Implements LIMIT_RELOAD_CLASS.  We basically want to avoid using
751    address registers for reloads since they're needed for address
752    reloads.  */
753 int
754 m32c_limit_reload_class (machine_mode mode, int rclass)
755 {
756 #if DEBUG0
757   fprintf (stderr, "limit_reload_class for %s: %s ->",
758 	   mode_name[mode], class_names[rclass]);
759 #endif
760 
761   if (mode == QImode)
762     rclass = reduce_class (rclass, HL_REGS, rclass);
763   else if (mode == HImode)
764     rclass = reduce_class (rclass, HI_REGS, rclass);
765   else if (mode == SImode)
766     rclass = reduce_class (rclass, SI_REGS, rclass);
767 
768   if (rclass != A_REGS)
769     rclass = reduce_class (rclass, DI_REGS, rclass);
770 
771 #if DEBUG0
772   fprintf (stderr, " %s\n", class_names[rclass]);
773 #endif
774   return rclass;
775 }
776 
777 /* Implements SECONDARY_RELOAD_CLASS.  QImode have to be reloaded in
778    r0 or r1, as those are the only real QImode registers.  CR regs get
779    reloaded through appropriately sized general or address
780    registers.  */
781 int
782 m32c_secondary_reload_class (int rclass, machine_mode mode, rtx x)
783 {
784   int cc = class_contents[rclass][0];
785 #if DEBUG0
786   fprintf (stderr, "\nsecondary reload class %s %s\n",
787 	   class_names[rclass], mode_name[mode]);
788   debug_rtx (x);
789 #endif
790   if (mode == QImode
791       && GET_CODE (x) == MEM && (cc & ~class_contents[R23_REGS][0]) == 0)
792     return QI_REGS;
793   if (reg_classes_intersect_p (rclass, CR_REGS)
794       && GET_CODE (x) == REG
795       && REGNO (x) >= SB_REGNO && REGNO (x) <= SP_REGNO)
796     return (TARGET_A16 || mode == HImode) ? HI_REGS : A_REGS;
797   return NO_REGS;
798 }
799 
800 /* Implements TARGET_CLASS_LIKELY_SPILLED_P.  A_REGS is needed for address
801    reloads.  */
802 
803 #undef TARGET_CLASS_LIKELY_SPILLED_P
804 #define TARGET_CLASS_LIKELY_SPILLED_P m32c_class_likely_spilled_p
805 
806 static bool
807 m32c_class_likely_spilled_p (reg_class_t regclass)
808 {
809   if (regclass == A_REGS)
810     return true;
811 
812   return (reg_class_size[(int) regclass] == 1);
813 }
814 
815 /* Implements TARGET_CLASS_MAX_NREGS.  We calculate this according to its
816    documented meaning, to avoid potential inconsistencies with actual
817    class definitions.  */
818 
819 #undef TARGET_CLASS_MAX_NREGS
820 #define TARGET_CLASS_MAX_NREGS m32c_class_max_nregs
821 
822 static unsigned char
823 m32c_class_max_nregs (reg_class_t regclass, machine_mode mode)
824 {
825   int rn;
826   unsigned char max = 0;
827 
828   for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
829     if (TEST_HARD_REG_BIT (reg_class_contents[(int) regclass], rn))
830       {
831 	unsigned char n = m32c_hard_regno_nregs (rn, mode);
832 	if (max < n)
833 	  max = n;
834       }
835   return max;
836 }
837 
838 /* Implements CANNOT_CHANGE_MODE_CLASS.  Only r0 and r1 can change to
839    QI (r0l, r1l) because the chip doesn't support QI ops on other
840    registers (well, it does on a0/a1 but if we let gcc do that, reload
841    suffers).  Otherwise, we allow changes to larger modes.  */
842 int
843 m32c_cannot_change_mode_class (machine_mode from,
844 			       machine_mode to, int rclass)
845 {
846   int rn;
847 #if DEBUG0
848   fprintf (stderr, "cannot change from %s to %s in %s\n",
849 	   mode_name[from], mode_name[to], class_names[rclass]);
850 #endif
851 
852   /* If the larger mode isn't allowed in any of these registers, we
853      can't allow the change.  */
854   for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
855     if (class_contents[rclass][0] & (1 << rn))
856       if (! m32c_hard_regno_ok (rn, to))
857 	return 1;
858 
859   if (to == QImode)
860     return (class_contents[rclass][0] & 0x1ffa);
861 
862   if (class_contents[rclass][0] & 0x0005	/* r0, r1 */
863       && GET_MODE_SIZE (from) > 1)
864     return 0;
865   if (GET_MODE_SIZE (from) > 2)	/* all other regs */
866     return 0;
867 
868   return 1;
869 }
870 
871 /* Helpers for the rest of the file.  */
872 /* TRUE if the rtx is a REG rtx for the given register.  */
873 #define IS_REG(rtx,regno) (GET_CODE (rtx) == REG \
874 			   && REGNO (rtx) == regno)
875 /* TRUE if the rtx is a pseudo - specifically, one we can use as a
876    base register in address calculations (hence the "strict"
877    argument).  */
878 #define IS_PSEUDO(rtx,strict) (!strict && GET_CODE (rtx) == REG \
879 			       && (REGNO (rtx) == AP_REGNO \
880 				   || REGNO (rtx) >= FIRST_PSEUDO_REGISTER))
881 
882 #define A0_OR_PSEUDO(x) (IS_REG(x, A0_REGNO) || REGNO (x) >= FIRST_PSEUDO_REGISTER)
883 
884 /* Implements matching for constraints (see next function too).  'S' is
885    for memory constraints, plus "Rpa" for PARALLEL rtx's we use for
886    call return values.  */
887 bool
888 m32c_matches_constraint_p (rtx value, int constraint)
889 {
890   encode_pattern (value);
891 
892   switch (constraint) {
893   case CONSTRAINT_SF:
894     return (far_addr_space_p (value)
895 	    && ((RTX_IS ("mr")
896 		 && A0_OR_PSEUDO (patternr[1])
897 		 && GET_MODE (patternr[1]) == SImode)
898 		|| (RTX_IS ("m+^Sri")
899 		    && A0_OR_PSEUDO (patternr[4])
900 		    && GET_MODE (patternr[4]) == HImode)
901 		|| (RTX_IS ("m+^Srs")
902 		    && A0_OR_PSEUDO (patternr[4])
903 		    && GET_MODE (patternr[4]) == HImode)
904 		|| (RTX_IS ("m+^S+ris")
905 		    && A0_OR_PSEUDO (patternr[5])
906 		    && GET_MODE (patternr[5]) == HImode)
907 		|| RTX_IS ("ms")));
908   case CONSTRAINT_Sd:
909     {
910       /* This is the common "src/dest" address */
911       rtx r;
912       if (GET_CODE (value) == MEM && CONSTANT_P (XEXP (value, 0)))
913 	return true;
914       if (RTX_IS ("ms") || RTX_IS ("m+si"))
915 	return true;
916       if (RTX_IS ("m++rii"))
917 	{
918 	  if (REGNO (patternr[3]) == FB_REGNO
919 	      && INTVAL (patternr[4]) == 0)
920 	    return true;
921 	}
922       if (RTX_IS ("mr"))
923 	r = patternr[1];
924       else if (RTX_IS ("m+ri") || RTX_IS ("m+rs") || RTX_IS ("m+r+si"))
925 	r = patternr[2];
926       else
927 	return false;
928       if (REGNO (r) == SP_REGNO)
929 	return false;
930       return m32c_legitimate_address_p (GET_MODE (value), XEXP (value, 0), 1);
931     }
932   case CONSTRAINT_Sa:
933     {
934       rtx r;
935       if (RTX_IS ("mr"))
936 	r = patternr[1];
937       else if (RTX_IS ("m+ri"))
938 	r = patternr[2];
939       else
940 	return false;
941       return (IS_REG (r, A0_REGNO) || IS_REG (r, A1_REGNO));
942     }
943   case CONSTRAINT_Si:
944     return (RTX_IS ("mi") || RTX_IS ("ms") || RTX_IS ("m+si"));
945   case CONSTRAINT_Ss:
946     return ((RTX_IS ("mr")
947 	     && (IS_REG (patternr[1], SP_REGNO)))
948 	    || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SP_REGNO))));
949   case CONSTRAINT_Sf:
950     return ((RTX_IS ("mr")
951 	     && (IS_REG (patternr[1], FB_REGNO)))
952 	    || (RTX_IS ("m+ri") && (IS_REG (patternr[2], FB_REGNO))));
953   case CONSTRAINT_Sb:
954     return ((RTX_IS ("mr")
955 	     && (IS_REG (patternr[1], SB_REGNO)))
956 	    || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SB_REGNO))));
957   case CONSTRAINT_Sp:
958     /* Absolute addresses 0..0x1fff used for bit addressing (I/O ports) */
959     return (RTX_IS ("mi")
960 	    && !(INTVAL (patternr[1]) & ~0x1fff));
961   case CONSTRAINT_S1:
962     return r1h_operand (value, QImode);
963   case CONSTRAINT_Rpa:
964     return GET_CODE (value) == PARALLEL;
965   default:
966     return false;
967   }
968 }
969 
970 /* STACK AND CALLING */
971 
972 /* Frame Layout */
973 
974 /* Implements RETURN_ADDR_RTX.  Note that R8C and M16C push 24 bits
975    (yes, THREE bytes) onto the stack for the return address, but we
976    don't support pointers bigger than 16 bits on those chips.  This
977    will likely wreak havoc with exception unwinding.  FIXME.  */
978 rtx
979 m32c_return_addr_rtx (int count)
980 {
981   machine_mode mode;
982   int offset;
983   rtx ra_mem;
984 
985   if (count)
986     return NULL_RTX;
987   /* we want 2[$fb] */
988 
989   if (TARGET_A24)
990     {
991       /* It's four bytes */
992       mode = PSImode;
993       offset = 4;
994     }
995   else
996     {
997       /* FIXME: it's really 3 bytes */
998       mode = HImode;
999       offset = 2;
1000     }
1001 
1002   ra_mem =
1003     gen_rtx_MEM (mode, plus_constant (Pmode, gen_rtx_REG (Pmode, FP_REGNO),
1004 				      offset));
1005   return copy_to_mode_reg (mode, ra_mem);
1006 }
1007 
1008 /* Implements INCOMING_RETURN_ADDR_RTX.  See comment above.  */
1009 rtx
1010 m32c_incoming_return_addr_rtx (void)
1011 {
1012   /* we want [sp] */
1013   return gen_rtx_MEM (PSImode, gen_rtx_REG (PSImode, SP_REGNO));
1014 }
1015 
1016 /* Exception Handling Support */
1017 
1018 /* Implements EH_RETURN_DATA_REGNO.  Choose registers able to hold
1019    pointers.  */
1020 int
1021 m32c_eh_return_data_regno (int n)
1022 {
1023   switch (n)
1024     {
1025     case 0:
1026       return MEM0_REGNO;
1027     case 1:
1028       return MEM0_REGNO+4;
1029     default:
1030       return INVALID_REGNUM;
1031     }
1032 }
1033 
1034 /* Implements EH_RETURN_STACKADJ_RTX.  Saved and used later in
1035    m32c_emit_eh_epilogue.  */
1036 rtx
1037 m32c_eh_return_stackadj_rtx (void)
1038 {
1039   if (!cfun->machine->eh_stack_adjust)
1040     {
1041       rtx sa;
1042 
1043       sa = gen_rtx_REG (Pmode, R0_REGNO);
1044       cfun->machine->eh_stack_adjust = sa;
1045     }
1046   return cfun->machine->eh_stack_adjust;
1047 }
1048 
1049 /* Registers That Address the Stack Frame */
1050 
1051 /* Implements DWARF_FRAME_REGNUM and DBX_REGISTER_NUMBER.  Note that
1052    the original spec called for dwarf numbers to vary with register
1053    width as well, for example, r0l, r0, and r2r0 would each have
1054    different dwarf numbers.  GCC doesn't support this, and we don't do
1055    it, and gdb seems to like it this way anyway.  */
1056 unsigned int
1057 m32c_dwarf_frame_regnum (int n)
1058 {
1059   switch (n)
1060     {
1061     case R0_REGNO:
1062       return 5;
1063     case R1_REGNO:
1064       return 6;
1065     case R2_REGNO:
1066       return 7;
1067     case R3_REGNO:
1068       return 8;
1069     case A0_REGNO:
1070       return 9;
1071     case A1_REGNO:
1072       return 10;
1073     case FB_REGNO:
1074       return 11;
1075     case SB_REGNO:
1076       return 19;
1077 
1078     case SP_REGNO:
1079       return 12;
1080     case PC_REGNO:
1081       return 13;
1082     default:
1083       return DWARF_FRAME_REGISTERS + 1;
1084     }
1085 }
1086 
1087 /* The frame looks like this:
1088 
1089    ap -> +------------------------------
1090          | Return address (3 or 4 bytes)
1091 	 | Saved FB (2 or 4 bytes)
1092    fb -> +------------------------------
1093 	 | local vars
1094          | register saves fb
1095 	 |        through r0 as needed
1096    sp -> +------------------------------
1097 */
1098 
1099 /* We use this to wrap all emitted insns in the prologue.  */
1100 static rtx
1101 F (rtx x)
1102 {
1103   RTX_FRAME_RELATED_P (x) = 1;
1104   return x;
1105 }
1106 
1107 /* This maps register numbers to the PUSHM/POPM bitfield, and tells us
1108    how much the stack pointer moves for each, for each cpu family.  */
1109 static struct
1110 {
1111   int reg1;
1112   int bit;
1113   int a16_bytes;
1114   int a24_bytes;
1115 } pushm_info[] =
1116 {
1117   /* These are in reverse push (nearest-to-sp) order.  */
1118   { R0_REGNO, 0x80, 2, 2 },
1119   { R1_REGNO, 0x40, 2, 2 },
1120   { R2_REGNO, 0x20, 2, 2 },
1121   { R3_REGNO, 0x10, 2, 2 },
1122   { A0_REGNO, 0x08, 2, 4 },
1123   { A1_REGNO, 0x04, 2, 4 },
1124   { SB_REGNO, 0x02, 2, 4 },
1125   { FB_REGNO, 0x01, 2, 4 }
1126 };
1127 
1128 #define PUSHM_N (sizeof(pushm_info)/sizeof(pushm_info[0]))
1129 
1130 /* Returns TRUE if we need to save/restore the given register.  We
1131    save everything for exception handlers, so that any register can be
1132    unwound.  For interrupt handlers, we save everything if the handler
1133    calls something else (because we don't know what *that* function
1134    might do), but try to be a bit smarter if the handler is a leaf
1135    function.  We always save $a0, though, because we use that in the
1136    epilogue to copy $fb to $sp.  */
1137 static int
1138 need_to_save (int regno)
1139 {
1140   if (fixed_regs[regno])
1141     return 0;
1142   if (crtl->calls_eh_return)
1143     return 1;
1144   if (regno == FP_REGNO)
1145     return 0;
1146   if (cfun->machine->is_interrupt
1147       && (!cfun->machine->is_leaf
1148 	  || (regno == A0_REGNO
1149 	      && m32c_function_needs_enter ())
1150 	  ))
1151     return 1;
1152   if (df_regs_ever_live_p (regno)
1153       && (!call_used_regs[regno] || cfun->machine->is_interrupt))
1154     return 1;
1155   return 0;
1156 }
1157 
1158 /* This function contains all the intelligence about saving and
1159    restoring registers.  It always figures out the register save set.
1160    When called with PP_justcount, it merely returns the size of the
1161    save set (for eliminating the frame pointer, for example).  When
1162    called with PP_pushm or PP_popm, it emits the appropriate
1163    instructions for saving (pushm) or restoring (popm) the
1164    registers.  */
1165 static int
1166 m32c_pushm_popm (Push_Pop_Type ppt)
1167 {
1168   int reg_mask = 0;
1169   int byte_count = 0, bytes;
1170   int i;
1171   rtx dwarf_set[PUSHM_N];
1172   int n_dwarfs = 0;
1173   int nosave_mask = 0;
1174 
1175   if (crtl->return_rtx
1176       && GET_CODE (crtl->return_rtx) == PARALLEL
1177       && !(crtl->calls_eh_return || cfun->machine->is_interrupt))
1178     {
1179       rtx exp = XVECEXP (crtl->return_rtx, 0, 0);
1180       rtx rv = XEXP (exp, 0);
1181       int rv_bytes = GET_MODE_SIZE (GET_MODE (rv));
1182 
1183       if (rv_bytes > 2)
1184 	nosave_mask |= 0x20;	/* PSI, SI */
1185       else
1186 	nosave_mask |= 0xf0;	/* DF */
1187       if (rv_bytes > 4)
1188 	nosave_mask |= 0x50;	/* DI */
1189     }
1190 
1191   for (i = 0; i < (int) PUSHM_N; i++)
1192     {
1193       /* Skip if neither register needs saving.  */
1194       if (!need_to_save (pushm_info[i].reg1))
1195 	continue;
1196 
1197       if (pushm_info[i].bit & nosave_mask)
1198 	continue;
1199 
1200       reg_mask |= pushm_info[i].bit;
1201       bytes = TARGET_A16 ? pushm_info[i].a16_bytes : pushm_info[i].a24_bytes;
1202 
1203       if (ppt == PP_pushm)
1204 	{
1205 	  machine_mode mode = (bytes == 2) ? HImode : SImode;
1206 	  rtx addr;
1207 
1208 	  /* Always use stack_pointer_rtx instead of calling
1209 	     rtx_gen_REG ourselves.  Code elsewhere in GCC assumes
1210 	     that there is a single rtx representing the stack pointer,
1211 	     namely stack_pointer_rtx, and uses == to recognize it.  */
1212 	  addr = stack_pointer_rtx;
1213 
1214 	  if (byte_count != 0)
1215 	    addr = gen_rtx_PLUS (GET_MODE (addr), addr, GEN_INT (byte_count));
1216 
1217 	  dwarf_set[n_dwarfs++] =
1218 	    gen_rtx_SET (VOIDmode,
1219 			 gen_rtx_MEM (mode, addr),
1220 			 gen_rtx_REG (mode, pushm_info[i].reg1));
1221 	  F (dwarf_set[n_dwarfs - 1]);
1222 
1223 	}
1224       byte_count += bytes;
1225     }
1226 
1227   if (cfun->machine->is_interrupt)
1228     {
1229       cfun->machine->intr_pushm = reg_mask & 0xfe;
1230       reg_mask = 0;
1231       byte_count = 0;
1232     }
1233 
1234   if (cfun->machine->is_interrupt)
1235     for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1236       if (need_to_save (i))
1237 	{
1238 	  byte_count += 2;
1239 	  cfun->machine->intr_pushmem[i - MEM0_REGNO] = 1;
1240 	}
1241 
1242   if (ppt == PP_pushm && byte_count)
1243     {
1244       rtx note = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (n_dwarfs + 1));
1245       rtx pushm;
1246 
1247       if (reg_mask)
1248 	{
1249 	  XVECEXP (note, 0, 0)
1250 	    = gen_rtx_SET (VOIDmode,
1251 			   stack_pointer_rtx,
1252 			   gen_rtx_PLUS (GET_MODE (stack_pointer_rtx),
1253 					 stack_pointer_rtx,
1254 					 GEN_INT (-byte_count)));
1255 	  F (XVECEXP (note, 0, 0));
1256 
1257 	  for (i = 0; i < n_dwarfs; i++)
1258 	    XVECEXP (note, 0, i + 1) = dwarf_set[i];
1259 
1260 	  pushm = F (emit_insn (gen_pushm (GEN_INT (reg_mask))));
1261 
1262 	  add_reg_note (pushm, REG_FRAME_RELATED_EXPR, note);
1263 	}
1264 
1265       if (cfun->machine->is_interrupt)
1266 	for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1267 	  if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1268 	    {
1269 	      if (TARGET_A16)
1270 		pushm = emit_insn (gen_pushhi_16 (gen_rtx_REG (HImode, i)));
1271 	      else
1272 		pushm = emit_insn (gen_pushhi_24 (gen_rtx_REG (HImode, i)));
1273 	      F (pushm);
1274 	    }
1275     }
1276   if (ppt == PP_popm && byte_count)
1277     {
1278       if (cfun->machine->is_interrupt)
1279 	for (i = MEM7_REGNO; i >= MEM0_REGNO; i--)
1280 	  if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1281 	    {
1282 	      if (TARGET_A16)
1283 		emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, i)));
1284 	      else
1285 		emit_insn (gen_pophi_24 (gen_rtx_REG (HImode, i)));
1286 	    }
1287       if (reg_mask)
1288 	emit_insn (gen_popm (GEN_INT (reg_mask)));
1289     }
1290 
1291   return byte_count;
1292 }
1293 
1294 /* Implements INITIAL_ELIMINATION_OFFSET.  See the comment above that
1295    diagrams our call frame.  */
1296 int
1297 m32c_initial_elimination_offset (int from, int to)
1298 {
1299   int ofs = 0;
1300 
1301   if (from == AP_REGNO)
1302     {
1303       if (TARGET_A16)
1304 	ofs += 5;
1305       else
1306 	ofs += 8;
1307     }
1308 
1309   if (to == SP_REGNO)
1310     {
1311       ofs += m32c_pushm_popm (PP_justcount);
1312       ofs += get_frame_size ();
1313     }
1314 
1315   /* Account for push rounding.  */
1316   if (TARGET_A24)
1317     ofs = (ofs + 1) & ~1;
1318 #if DEBUG0
1319   fprintf (stderr, "initial_elimination_offset from=%d to=%d, ofs=%d\n", from,
1320 	   to, ofs);
1321 #endif
1322   return ofs;
1323 }
1324 
1325 /* Passing Function Arguments on the Stack */
1326 
1327 /* Implements PUSH_ROUNDING.  The R8C and M16C have byte stacks, the
1328    M32C has word stacks.  */
1329 unsigned int
1330 m32c_push_rounding (int n)
1331 {
1332   if (TARGET_R8C || TARGET_M16C)
1333     return n;
1334   return (n + 1) & ~1;
1335 }
1336 
1337 /* Passing Arguments in Registers */
1338 
1339 /* Implements TARGET_FUNCTION_ARG.  Arguments are passed partly in
1340    registers, partly on stack.  If our function returns a struct, a
1341    pointer to a buffer for it is at the top of the stack (last thing
1342    pushed).  The first few real arguments may be in registers as
1343    follows:
1344 
1345    R8C/M16C:	arg1 in r1 if it's QI or HI (else it's pushed on stack)
1346 		arg2 in r2 if it's HI (else pushed on stack)
1347 		rest on stack
1348    M32C:        arg1 in r0 if it's QI or HI (else it's pushed on stack)
1349 		rest on stack
1350 
1351    Structs are not passed in registers, even if they fit.  Only
1352    integer and pointer types are passed in registers.
1353 
1354    Note that when arg1 doesn't fit in r1, arg2 may still be passed in
1355    r2 if it fits.  */
1356 #undef TARGET_FUNCTION_ARG
1357 #define TARGET_FUNCTION_ARG m32c_function_arg
1358 static rtx
1359 m32c_function_arg (cumulative_args_t ca_v,
1360 		   machine_mode mode, const_tree type, bool named)
1361 {
1362   CUMULATIVE_ARGS *ca = get_cumulative_args (ca_v);
1363 
1364   /* Can return a reg, parallel, or 0 for stack */
1365   rtx rv = NULL_RTX;
1366 #if DEBUG0
1367   fprintf (stderr, "func_arg %d (%s, %d)\n",
1368 	   ca->parm_num, mode_name[mode], named);
1369   debug_tree ((tree)type);
1370 #endif
1371 
1372   if (mode == VOIDmode)
1373     return GEN_INT (0);
1374 
1375   if (ca->force_mem || !named)
1376     {
1377 #if DEBUG0
1378       fprintf (stderr, "func arg: force %d named %d, mem\n", ca->force_mem,
1379 	       named);
1380 #endif
1381       return NULL_RTX;
1382     }
1383 
1384   if (type && INTEGRAL_TYPE_P (type) && POINTER_TYPE_P (type))
1385     return NULL_RTX;
1386 
1387   if (type && AGGREGATE_TYPE_P (type))
1388     return NULL_RTX;
1389 
1390   switch (ca->parm_num)
1391     {
1392     case 1:
1393       if (GET_MODE_SIZE (mode) == 1 || GET_MODE_SIZE (mode) == 2)
1394 	rv = gen_rtx_REG (mode, TARGET_A16 ? R1_REGNO : R0_REGNO);
1395       break;
1396 
1397     case 2:
1398       if (TARGET_A16 && GET_MODE_SIZE (mode) == 2)
1399 	rv = gen_rtx_REG (mode, R2_REGNO);
1400       break;
1401     }
1402 
1403 #if DEBUG0
1404   debug_rtx (rv);
1405 #endif
1406   return rv;
1407 }
1408 
1409 #undef TARGET_PASS_BY_REFERENCE
1410 #define TARGET_PASS_BY_REFERENCE m32c_pass_by_reference
1411 static bool
1412 m32c_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
1413 			machine_mode mode ATTRIBUTE_UNUSED,
1414 			const_tree type ATTRIBUTE_UNUSED,
1415 			bool named ATTRIBUTE_UNUSED)
1416 {
1417   return 0;
1418 }
1419 
1420 /* Implements INIT_CUMULATIVE_ARGS.  */
1421 void
1422 m32c_init_cumulative_args (CUMULATIVE_ARGS * ca,
1423 			   tree fntype,
1424 			   rtx libname ATTRIBUTE_UNUSED,
1425 			   tree fndecl,
1426 			   int n_named_args ATTRIBUTE_UNUSED)
1427 {
1428   if (fntype && aggregate_value_p (TREE_TYPE (fntype), fndecl))
1429     ca->force_mem = 1;
1430   else
1431     ca->force_mem = 0;
1432   ca->parm_num = 1;
1433 }
1434 
1435 /* Implements TARGET_FUNCTION_ARG_ADVANCE.  force_mem is set for
1436    functions returning structures, so we always reset that.  Otherwise,
1437    we only need to know the sequence number of the argument to know what
1438    to do with it.  */
1439 #undef TARGET_FUNCTION_ARG_ADVANCE
1440 #define TARGET_FUNCTION_ARG_ADVANCE m32c_function_arg_advance
1441 static void
1442 m32c_function_arg_advance (cumulative_args_t ca_v,
1443 			   machine_mode mode ATTRIBUTE_UNUSED,
1444 			   const_tree type ATTRIBUTE_UNUSED,
1445 			   bool named ATTRIBUTE_UNUSED)
1446 {
1447   CUMULATIVE_ARGS *ca = get_cumulative_args (ca_v);
1448 
1449   if (ca->force_mem)
1450     ca->force_mem = 0;
1451   else
1452     ca->parm_num++;
1453 }
1454 
1455 /* Implements TARGET_FUNCTION_ARG_BOUNDARY.  */
1456 #undef TARGET_FUNCTION_ARG_BOUNDARY
1457 #define TARGET_FUNCTION_ARG_BOUNDARY m32c_function_arg_boundary
1458 static unsigned int
1459 m32c_function_arg_boundary (machine_mode mode ATTRIBUTE_UNUSED,
1460 			    const_tree type ATTRIBUTE_UNUSED)
1461 {
1462   return (TARGET_A16 ? 8 : 16);
1463 }
1464 
1465 /* Implements FUNCTION_ARG_REGNO_P.  */
1466 int
1467 m32c_function_arg_regno_p (int r)
1468 {
1469   if (TARGET_A24)
1470     return (r == R0_REGNO);
1471   return (r == R1_REGNO || r == R2_REGNO);
1472 }
1473 
1474 /* HImode and PSImode are the two "native" modes as far as GCC is
1475    concerned, but the chips also support a 32-bit mode which is used
1476    for some opcodes in R8C/M16C and for reset vectors and such.  */
1477 #undef TARGET_VALID_POINTER_MODE
1478 #define TARGET_VALID_POINTER_MODE m32c_valid_pointer_mode
1479 static bool
1480 m32c_valid_pointer_mode (machine_mode mode)
1481 {
1482   if (mode == HImode
1483       || mode == PSImode
1484       || mode == SImode
1485       )
1486     return 1;
1487   return 0;
1488 }
1489 
1490 /* How Scalar Function Values Are Returned */
1491 
1492 /* Implements TARGET_LIBCALL_VALUE.  Most values are returned in $r0, or some
1493    combination of registers starting there (r2r0 for longs, r3r1r2r0
1494    for long long, r3r2r1r0 for doubles), except that that ABI
1495    currently doesn't work because it ends up using all available
1496    general registers and gcc often can't compile it.  So, instead, we
1497    return anything bigger than 16 bits in "mem0" (effectively, a
1498    memory location).  */
1499 
1500 #undef TARGET_LIBCALL_VALUE
1501 #define TARGET_LIBCALL_VALUE m32c_libcall_value
1502 
1503 static rtx
1504 m32c_libcall_value (machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
1505 {
1506   /* return reg or parallel */
1507 #if 0
1508   /* FIXME: GCC has difficulty returning large values in registers,
1509      because that ties up most of the general registers and gives the
1510      register allocator little to work with.  Until we can resolve
1511      this, large values are returned in memory.  */
1512   if (mode == DFmode)
1513     {
1514       rtx rv;
1515 
1516       rv = gen_rtx_PARALLEL (mode, rtvec_alloc (4));
1517       XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1518 					      gen_rtx_REG (HImode,
1519 							   R0_REGNO),
1520 					      GEN_INT (0));
1521       XVECEXP (rv, 0, 1) = gen_rtx_EXPR_LIST (VOIDmode,
1522 					      gen_rtx_REG (HImode,
1523 							   R1_REGNO),
1524 					      GEN_INT (2));
1525       XVECEXP (rv, 0, 2) = gen_rtx_EXPR_LIST (VOIDmode,
1526 					      gen_rtx_REG (HImode,
1527 							   R2_REGNO),
1528 					      GEN_INT (4));
1529       XVECEXP (rv, 0, 3) = gen_rtx_EXPR_LIST (VOIDmode,
1530 					      gen_rtx_REG (HImode,
1531 							   R3_REGNO),
1532 					      GEN_INT (6));
1533       return rv;
1534     }
1535 
1536   if (TARGET_A24 && GET_MODE_SIZE (mode) > 2)
1537     {
1538       rtx rv;
1539 
1540       rv = gen_rtx_PARALLEL (mode, rtvec_alloc (1));
1541       XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1542 					      gen_rtx_REG (mode,
1543 							   R0_REGNO),
1544 					      GEN_INT (0));
1545       return rv;
1546     }
1547 #endif
1548 
1549   if (GET_MODE_SIZE (mode) > 2)
1550     return gen_rtx_REG (mode, MEM0_REGNO);
1551   return gen_rtx_REG (mode, R0_REGNO);
1552 }
1553 
1554 /* Implements TARGET_FUNCTION_VALUE.  Functions and libcalls have the same
1555    conventions.  */
1556 
1557 #undef TARGET_FUNCTION_VALUE
1558 #define TARGET_FUNCTION_VALUE m32c_function_value
1559 
1560 static rtx
1561 m32c_function_value (const_tree valtype,
1562 		     const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
1563 		     bool outgoing ATTRIBUTE_UNUSED)
1564 {
1565   /* return reg or parallel */
1566   const machine_mode mode = TYPE_MODE (valtype);
1567   return m32c_libcall_value (mode, NULL_RTX);
1568 }
1569 
1570 /* Implements TARGET_FUNCTION_VALUE_REGNO_P.  */
1571 
1572 #undef TARGET_FUNCTION_VALUE_REGNO_P
1573 #define TARGET_FUNCTION_VALUE_REGNO_P m32c_function_value_regno_p
1574 
1575 static bool
1576 m32c_function_value_regno_p (const unsigned int regno)
1577 {
1578   return (regno == R0_REGNO || regno == MEM0_REGNO);
1579 }
1580 
1581 /* How Large Values Are Returned */
1582 
1583 /* We return structures by pushing the address on the stack, even if
1584    we use registers for the first few "real" arguments.  */
1585 #undef TARGET_STRUCT_VALUE_RTX
1586 #define TARGET_STRUCT_VALUE_RTX m32c_struct_value_rtx
1587 static rtx
1588 m32c_struct_value_rtx (tree fndecl ATTRIBUTE_UNUSED,
1589 		       int incoming ATTRIBUTE_UNUSED)
1590 {
1591   return 0;
1592 }
1593 
1594 /* Function Entry and Exit */
1595 
1596 /* Implements EPILOGUE_USES.  Interrupts restore all registers.  */
1597 int
1598 m32c_epilogue_uses (int regno ATTRIBUTE_UNUSED)
1599 {
1600   if (cfun->machine->is_interrupt)
1601     return 1;
1602   return 0;
1603 }
1604 
1605 /* Implementing the Varargs Macros */
1606 
1607 #undef TARGET_STRICT_ARGUMENT_NAMING
1608 #define TARGET_STRICT_ARGUMENT_NAMING m32c_strict_argument_naming
1609 static bool
1610 m32c_strict_argument_naming (cumulative_args_t ca ATTRIBUTE_UNUSED)
1611 {
1612   return 1;
1613 }
1614 
1615 /* Trampolines for Nested Functions */
1616 
1617 /*
1618    m16c:
1619    1 0000 75C43412              mov.w   #0x1234,a0
1620    2 0004 FC000000              jmp.a   label
1621 
1622    m32c:
1623    1 0000 BC563412              mov.l:s #0x123456,a0
1624    2 0004 CC000000              jmp.a   label
1625 */
1626 
1627 /* Implements TRAMPOLINE_SIZE.  */
1628 int
1629 m32c_trampoline_size (void)
1630 {
1631   /* Allocate extra space so we can avoid the messy shifts when we
1632      initialize the trampoline; we just write past the end of the
1633      opcode.  */
1634   return TARGET_A16 ? 8 : 10;
1635 }
1636 
1637 /* Implements TRAMPOLINE_ALIGNMENT.  */
1638 int
1639 m32c_trampoline_alignment (void)
1640 {
1641   return 2;
1642 }
1643 
1644 /* Implements TARGET_TRAMPOLINE_INIT.  */
1645 
1646 #undef TARGET_TRAMPOLINE_INIT
1647 #define TARGET_TRAMPOLINE_INIT m32c_trampoline_init
1648 static void
1649 m32c_trampoline_init (rtx m_tramp, tree fndecl, rtx chainval)
1650 {
1651   rtx function = XEXP (DECL_RTL (fndecl), 0);
1652 
1653 #define A0(m,i) adjust_address (m_tramp, m, i)
1654   if (TARGET_A16)
1655     {
1656       /* Note: we subtract a "word" because the moves want signed
1657 	 constants, not unsigned constants.  */
1658       emit_move_insn (A0 (HImode, 0), GEN_INT (0xc475 - 0x10000));
1659       emit_move_insn (A0 (HImode, 2), chainval);
1660       emit_move_insn (A0 (QImode, 4), GEN_INT (0xfc - 0x100));
1661       /* We use 16-bit addresses here, but store the zero to turn it
1662 	 into a 24-bit offset.  */
1663       emit_move_insn (A0 (HImode, 5), function);
1664       emit_move_insn (A0 (QImode, 7), GEN_INT (0x00));
1665     }
1666   else
1667     {
1668       /* Note that the PSI moves actually write 4 bytes.  Make sure we
1669 	 write stuff out in the right order, and leave room for the
1670 	 extra byte at the end.  */
1671       emit_move_insn (A0 (QImode, 0), GEN_INT (0xbc - 0x100));
1672       emit_move_insn (A0 (PSImode, 1), chainval);
1673       emit_move_insn (A0 (QImode, 4), GEN_INT (0xcc - 0x100));
1674       emit_move_insn (A0 (PSImode, 5), function);
1675     }
1676 #undef A0
1677 }
1678 
1679 /* Addressing Modes */
1680 
1681 /* The r8c/m32c family supports a wide range of non-orthogonal
1682    addressing modes, including the ability to double-indirect on *some*
1683    of them.  Not all insns support all modes, either, but we rely on
1684    predicates and constraints to deal with that.  */
1685 #undef TARGET_LEGITIMATE_ADDRESS_P
1686 #define TARGET_LEGITIMATE_ADDRESS_P m32c_legitimate_address_p
1687 bool
1688 m32c_legitimate_address_p (machine_mode mode, rtx x, bool strict)
1689 {
1690   int mode_adjust;
1691   if (CONSTANT_P (x))
1692     return 1;
1693 
1694   if (TARGET_A16 && GET_MODE (x) != HImode && GET_MODE (x) != SImode)
1695     return 0;
1696   if (TARGET_A24 && GET_MODE (x) != PSImode)
1697     return 0;
1698 
1699   /* Wide references to memory will be split after reload, so we must
1700      ensure that all parts of such splits remain legitimate
1701      addresses.  */
1702   mode_adjust = GET_MODE_SIZE (mode) - 1;
1703 
1704   /* allowing PLUS yields mem:HI(plus:SI(mem:SI(plus:SI in m32c_split_move */
1705   if (GET_CODE (x) == PRE_DEC
1706       || GET_CODE (x) == POST_INC || GET_CODE (x) == PRE_MODIFY)
1707     {
1708       return (GET_CODE (XEXP (x, 0)) == REG
1709 	      && REGNO (XEXP (x, 0)) == SP_REGNO);
1710     }
1711 
1712 #if 0
1713   /* This is the double indirection detection, but it currently
1714      doesn't work as cleanly as this code implies, so until we've had
1715      a chance to debug it, leave it disabled.  */
1716   if (TARGET_A24 && GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) != PLUS)
1717     {
1718 #if DEBUG_DOUBLE
1719       fprintf (stderr, "double indirect\n");
1720 #endif
1721       x = XEXP (x, 0);
1722     }
1723 #endif
1724 
1725   encode_pattern (x);
1726   if (RTX_IS ("r"))
1727     {
1728       /* Most indexable registers can be used without displacements,
1729 	 although some of them will be emitted with an explicit zero
1730 	 to please the assembler.  */
1731       switch (REGNO (patternr[0]))
1732 	{
1733 	case A1_REGNO:
1734 	case SB_REGNO:
1735 	case FB_REGNO:
1736 	case SP_REGNO:
1737 	  if (TARGET_A16 && GET_MODE (x) == SImode)
1738 	    return 0;
1739 	case A0_REGNO:
1740 	  return 1;
1741 
1742 	default:
1743 	  if (IS_PSEUDO (patternr[0], strict))
1744 	    return 1;
1745 	  return 0;
1746 	}
1747     }
1748 
1749   if (TARGET_A16 && GET_MODE (x) == SImode)
1750     return 0;
1751 
1752   if (RTX_IS ("+ri"))
1753     {
1754       /* This is more interesting, because different base registers
1755 	 allow for different displacements - both range and signedness
1756 	 - and it differs from chip series to chip series too.  */
1757       int rn = REGNO (patternr[1]);
1758       HOST_WIDE_INT offs = INTVAL (patternr[2]);
1759       switch (rn)
1760 	{
1761 	case A0_REGNO:
1762 	case A1_REGNO:
1763 	case SB_REGNO:
1764 	  /* The syntax only allows positive offsets, but when the
1765 	     offsets span the entire memory range, we can simulate
1766 	     negative offsets by wrapping.  */
1767 	  if (TARGET_A16)
1768 	    return (offs >= -65536 && offs <= 65535 - mode_adjust);
1769 	  if (rn == SB_REGNO)
1770 	    return (offs >= 0 && offs <= 65535 - mode_adjust);
1771 	  /* A0 or A1 */
1772 	  return (offs >= -16777216 && offs <= 16777215);
1773 
1774 	case FB_REGNO:
1775 	  if (TARGET_A16)
1776 	    return (offs >= -128 && offs <= 127 - mode_adjust);
1777 	  return (offs >= -65536 && offs <= 65535 - mode_adjust);
1778 
1779 	case SP_REGNO:
1780 	  return (offs >= -128 && offs <= 127 - mode_adjust);
1781 
1782 	default:
1783 	  if (IS_PSEUDO (patternr[1], strict))
1784 	    return 1;
1785 	  return 0;
1786 	}
1787     }
1788   if (RTX_IS ("+rs") || RTX_IS ("+r+si"))
1789     {
1790       rtx reg = patternr[1];
1791 
1792       /* We don't know where the symbol is, so only allow base
1793 	 registers which support displacements spanning the whole
1794 	 address range.  */
1795       switch (REGNO (reg))
1796 	{
1797 	case A0_REGNO:
1798 	case A1_REGNO:
1799 	  /* $sb needs a secondary reload, but since it's involved in
1800 	     memory address reloads too, we don't deal with it very
1801 	     well.  */
1802 	  /*    case SB_REGNO: */
1803 	  return 1;
1804 	default:
1805 	  if (GET_CODE (reg) == SUBREG)
1806 	    return 0;
1807 	  if (IS_PSEUDO (reg, strict))
1808 	    return 1;
1809 	  return 0;
1810 	}
1811     }
1812   return 0;
1813 }
1814 
1815 /* Implements REG_OK_FOR_BASE_P.  */
1816 int
1817 m32c_reg_ok_for_base_p (rtx x, int strict)
1818 {
1819   if (GET_CODE (x) != REG)
1820     return 0;
1821   switch (REGNO (x))
1822     {
1823     case A0_REGNO:
1824     case A1_REGNO:
1825     case SB_REGNO:
1826     case FB_REGNO:
1827     case SP_REGNO:
1828       return 1;
1829     default:
1830       if (IS_PSEUDO (x, strict))
1831 	return 1;
1832       return 0;
1833     }
1834 }
1835 
1836 /* We have three choices for choosing fb->aN offsets.  If we choose -128,
1837    we need one MOVA -128[fb],aN opcode and 16-bit aN displacements,
1838    like this:
1839        EB 4B FF    mova    -128[$fb],$a0
1840        D8 0C FF FF mov.w:Q #0,-1[$a0]
1841 
1842    Alternately, we subtract the frame size, and hopefully use 8-bit aN
1843    displacements:
1844        7B F4       stc $fb,$a0
1845        77 54 00 01 sub #256,$a0
1846        D8 08 01    mov.w:Q #0,1[$a0]
1847 
1848    If we don't offset (i.e. offset by zero), we end up with:
1849        7B F4       stc $fb,$a0
1850        D8 0C 00 FF mov.w:Q #0,-256[$a0]
1851 
1852    We have to subtract *something* so that we have a PLUS rtx to mark
1853    that we've done this reload.  The -128 offset will never result in
1854    an 8-bit aN offset, and the payoff for the second case is five
1855    loads *if* those loads are within 256 bytes of the other end of the
1856    frame, so the third case seems best.  Note that we subtract the
1857    zero, but detect that in the addhi3 pattern.  */
1858 
1859 #define BIG_FB_ADJ 0
1860 
1861 /* Implements LEGITIMIZE_ADDRESS.  The only address we really have to
1862    worry about is frame base offsets, as $fb has a limited
1863    displacement range.  We deal with this by attempting to reload $fb
1864    itself into an address register; that seems to result in the best
1865    code.  */
1866 #undef TARGET_LEGITIMIZE_ADDRESS
1867 #define TARGET_LEGITIMIZE_ADDRESS m32c_legitimize_address
1868 static rtx
1869 m32c_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1870 			 machine_mode mode)
1871 {
1872 #if DEBUG0
1873   fprintf (stderr, "m32c_legitimize_address for mode %s\n", mode_name[mode]);
1874   debug_rtx (x);
1875   fprintf (stderr, "\n");
1876 #endif
1877 
1878   if (GET_CODE (x) == PLUS
1879       && GET_CODE (XEXP (x, 0)) == REG
1880       && REGNO (XEXP (x, 0)) == FB_REGNO
1881       && GET_CODE (XEXP (x, 1)) == CONST_INT
1882       && (INTVAL (XEXP (x, 1)) < -128
1883 	  || INTVAL (XEXP (x, 1)) > (128 - GET_MODE_SIZE (mode))))
1884     {
1885       /* reload FB to A_REGS */
1886       rtx temp = gen_reg_rtx (Pmode);
1887       x = copy_rtx (x);
1888       emit_insn (gen_rtx_SET (VOIDmode, temp, XEXP (x, 0)));
1889       XEXP (x, 0) = temp;
1890     }
1891 
1892   return x;
1893 }
1894 
1895 /* Implements LEGITIMIZE_RELOAD_ADDRESS.  See comment above.  */
1896 int
1897 m32c_legitimize_reload_address (rtx * x,
1898 				machine_mode mode,
1899 				int opnum,
1900 				int type, int ind_levels ATTRIBUTE_UNUSED)
1901 {
1902 #if DEBUG0
1903   fprintf (stderr, "\nm32c_legitimize_reload_address for mode %s\n",
1904 	   mode_name[mode]);
1905   debug_rtx (*x);
1906 #endif
1907 
1908   /* At one point, this function tried to get $fb copied to an address
1909      register, which in theory would maximize sharing, but gcc was
1910      *also* still trying to reload the whole address, and we'd run out
1911      of address registers.  So we let gcc do the naive (but safe)
1912      reload instead, when the above function doesn't handle it for
1913      us.
1914 
1915      The code below is a second attempt at the above.  */
1916 
1917   if (GET_CODE (*x) == PLUS
1918       && GET_CODE (XEXP (*x, 0)) == REG
1919       && REGNO (XEXP (*x, 0)) == FB_REGNO
1920       && GET_CODE (XEXP (*x, 1)) == CONST_INT
1921       && (INTVAL (XEXP (*x, 1)) < -128
1922 	  || INTVAL (XEXP (*x, 1)) > (128 - GET_MODE_SIZE (mode))))
1923     {
1924       rtx sum;
1925       int offset = INTVAL (XEXP (*x, 1));
1926       int adjustment = -BIG_FB_ADJ;
1927 
1928       sum = gen_rtx_PLUS (Pmode, XEXP (*x, 0),
1929 			  GEN_INT (adjustment));
1930       *x = gen_rtx_PLUS (Pmode, sum, GEN_INT (offset - adjustment));
1931       if (type == RELOAD_OTHER)
1932 	type = RELOAD_FOR_OTHER_ADDRESS;
1933       push_reload (sum, NULL_RTX, &XEXP (*x, 0), NULL,
1934 		   A_REGS, Pmode, VOIDmode, 0, 0, opnum,
1935 		   (enum reload_type) type);
1936       return 1;
1937     }
1938 
1939   if (GET_CODE (*x) == PLUS
1940       && GET_CODE (XEXP (*x, 0)) == PLUS
1941       && GET_CODE (XEXP (XEXP (*x, 0), 0)) == REG
1942       && REGNO (XEXP (XEXP (*x, 0), 0)) == FB_REGNO
1943       && GET_CODE (XEXP (XEXP (*x, 0), 1)) == CONST_INT
1944       && GET_CODE (XEXP (*x, 1)) == CONST_INT
1945       )
1946     {
1947       if (type == RELOAD_OTHER)
1948 	type = RELOAD_FOR_OTHER_ADDRESS;
1949       push_reload (XEXP (*x, 0), NULL_RTX, &XEXP (*x, 0), NULL,
1950 		   A_REGS, Pmode, VOIDmode, 0, 0, opnum,
1951 		   (enum reload_type) type);
1952       return 1;
1953     }
1954 
1955   if (TARGET_A24 && GET_MODE (*x) == PSImode)
1956     {
1957       push_reload (*x, NULL_RTX, x, NULL,
1958 		   A_REGS, PSImode, VOIDmode, 0, 0, opnum,
1959 		   (enum reload_type) type);
1960       return 1;
1961     }
1962 
1963   return 0;
1964 }
1965 
1966 /* Return the appropriate mode for a named address pointer.  */
1967 #undef TARGET_ADDR_SPACE_POINTER_MODE
1968 #define TARGET_ADDR_SPACE_POINTER_MODE m32c_addr_space_pointer_mode
1969 static machine_mode
1970 m32c_addr_space_pointer_mode (addr_space_t addrspace)
1971 {
1972   switch (addrspace)
1973     {
1974     case ADDR_SPACE_GENERIC:
1975       return TARGET_A24 ? PSImode : HImode;
1976     case ADDR_SPACE_FAR:
1977       return SImode;
1978     default:
1979       gcc_unreachable ();
1980     }
1981 }
1982 
1983 /* Return the appropriate mode for a named address address.  */
1984 #undef TARGET_ADDR_SPACE_ADDRESS_MODE
1985 #define TARGET_ADDR_SPACE_ADDRESS_MODE m32c_addr_space_address_mode
1986 static machine_mode
1987 m32c_addr_space_address_mode (addr_space_t addrspace)
1988 {
1989   switch (addrspace)
1990     {
1991     case ADDR_SPACE_GENERIC:
1992       return TARGET_A24 ? PSImode : HImode;
1993     case ADDR_SPACE_FAR:
1994       return SImode;
1995     default:
1996       gcc_unreachable ();
1997     }
1998 }
1999 
2000 /* Like m32c_legitimate_address_p, except with named addresses.  */
2001 #undef TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P
2002 #define TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P \
2003   m32c_addr_space_legitimate_address_p
2004 static bool
2005 m32c_addr_space_legitimate_address_p (machine_mode mode, rtx x,
2006 				      bool strict, addr_space_t as)
2007 {
2008   if (as == ADDR_SPACE_FAR)
2009     {
2010       if (TARGET_A24)
2011 	return 0;
2012       encode_pattern (x);
2013       if (RTX_IS ("r"))
2014 	{
2015 	  if (GET_MODE (x) != SImode)
2016 	    return 0;
2017 	  switch (REGNO (patternr[0]))
2018 	    {
2019 	    case A0_REGNO:
2020 	      return 1;
2021 
2022 	    default:
2023 	      if (IS_PSEUDO (patternr[0], strict))
2024 		return 1;
2025 	      return 0;
2026 	    }
2027 	}
2028       if (RTX_IS ("+^Sri"))
2029 	{
2030 	  int rn = REGNO (patternr[3]);
2031 	  HOST_WIDE_INT offs = INTVAL (patternr[4]);
2032 	  if (GET_MODE (patternr[3]) != HImode)
2033 	    return 0;
2034 	  switch (rn)
2035 	    {
2036 	    case A0_REGNO:
2037 	      return (offs >= 0 && offs <= 0xfffff);
2038 
2039 	    default:
2040 	      if (IS_PSEUDO (patternr[3], strict))
2041 		return 1;
2042 	      return 0;
2043 	    }
2044 	}
2045       if (RTX_IS ("+^Srs"))
2046 	{
2047 	  int rn = REGNO (patternr[3]);
2048 	  if (GET_MODE (patternr[3]) != HImode)
2049 	    return 0;
2050 	  switch (rn)
2051 	    {
2052 	    case A0_REGNO:
2053 	      return 1;
2054 
2055 	    default:
2056 	      if (IS_PSEUDO (patternr[3], strict))
2057 		return 1;
2058 	      return 0;
2059 	    }
2060 	}
2061       if (RTX_IS ("+^S+ris"))
2062 	{
2063 	  int rn = REGNO (patternr[4]);
2064 	  if (GET_MODE (patternr[4]) != HImode)
2065 	    return 0;
2066 	  switch (rn)
2067 	    {
2068 	    case A0_REGNO:
2069 	      return 1;
2070 
2071 	    default:
2072 	      if (IS_PSEUDO (patternr[4], strict))
2073 		return 1;
2074 	      return 0;
2075 	    }
2076 	}
2077       if (RTX_IS ("s"))
2078 	{
2079 	  return 1;
2080 	}
2081       return 0;
2082     }
2083 
2084   else if (as != ADDR_SPACE_GENERIC)
2085     gcc_unreachable ();
2086 
2087   return m32c_legitimate_address_p (mode, x, strict);
2088 }
2089 
2090 /* Like m32c_legitimate_address, except with named address support.  */
2091 #undef TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS
2092 #define TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS m32c_addr_space_legitimize_address
2093 static rtx
2094 m32c_addr_space_legitimize_address (rtx x, rtx oldx, machine_mode mode,
2095 				    addr_space_t as)
2096 {
2097   if (as != ADDR_SPACE_GENERIC)
2098     {
2099 #if DEBUG0
2100       fprintf (stderr, "\033[36mm32c_addr_space_legitimize_address for mode %s\033[0m\n", mode_name[mode]);
2101       debug_rtx (x);
2102       fprintf (stderr, "\n");
2103 #endif
2104 
2105       if (GET_CODE (x) != REG)
2106 	{
2107 	  x = force_reg (SImode, x);
2108 	}
2109       return x;
2110     }
2111 
2112   return m32c_legitimize_address (x, oldx, mode);
2113 }
2114 
2115 /* Determine if one named address space is a subset of another.  */
2116 #undef TARGET_ADDR_SPACE_SUBSET_P
2117 #define TARGET_ADDR_SPACE_SUBSET_P m32c_addr_space_subset_p
2118 static bool
2119 m32c_addr_space_subset_p (addr_space_t subset, addr_space_t superset)
2120 {
2121   gcc_assert (subset == ADDR_SPACE_GENERIC || subset == ADDR_SPACE_FAR);
2122   gcc_assert (superset == ADDR_SPACE_GENERIC || superset == ADDR_SPACE_FAR);
2123 
2124   if (subset == superset)
2125     return true;
2126 
2127   else
2128     return (subset == ADDR_SPACE_GENERIC && superset == ADDR_SPACE_FAR);
2129 }
2130 
2131 #undef TARGET_ADDR_SPACE_CONVERT
2132 #define TARGET_ADDR_SPACE_CONVERT m32c_addr_space_convert
2133 /* Convert from one address space to another.  */
2134 static rtx
2135 m32c_addr_space_convert (rtx op, tree from_type, tree to_type)
2136 {
2137   addr_space_t from_as = TYPE_ADDR_SPACE (TREE_TYPE (from_type));
2138   addr_space_t to_as = TYPE_ADDR_SPACE (TREE_TYPE (to_type));
2139   rtx result;
2140 
2141   gcc_assert (from_as == ADDR_SPACE_GENERIC || from_as == ADDR_SPACE_FAR);
2142   gcc_assert (to_as == ADDR_SPACE_GENERIC || to_as == ADDR_SPACE_FAR);
2143 
2144   if (to_as == ADDR_SPACE_GENERIC && from_as == ADDR_SPACE_FAR)
2145     {
2146       /* This is unpredictable, as we're truncating off usable address
2147 	 bits.  */
2148 
2149       result = gen_reg_rtx (HImode);
2150       emit_move_insn (result, simplify_subreg (HImode, op, SImode, 0));
2151       return result;
2152     }
2153   else if (to_as == ADDR_SPACE_FAR && from_as == ADDR_SPACE_GENERIC)
2154     {
2155       /* This always works.  */
2156       result = gen_reg_rtx (SImode);
2157       emit_insn (gen_zero_extendhisi2 (result, op));
2158       return result;
2159     }
2160   else
2161     gcc_unreachable ();
2162 }
2163 
2164 /* Condition Code Status */
2165 
2166 #undef TARGET_FIXED_CONDITION_CODE_REGS
2167 #define TARGET_FIXED_CONDITION_CODE_REGS m32c_fixed_condition_code_regs
2168 static bool
2169 m32c_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
2170 {
2171   *p1 = FLG_REGNO;
2172   *p2 = INVALID_REGNUM;
2173   return true;
2174 }
2175 
2176 /* Describing Relative Costs of Operations */
2177 
2178 /* Implements TARGET_REGISTER_MOVE_COST.  We make impossible moves
2179    prohibitively expensive, like trying to put QIs in r2/r3 (there are
2180    no opcodes to do that).  We also discourage use of mem* registers
2181    since they're really memory.  */
2182 
2183 #undef TARGET_REGISTER_MOVE_COST
2184 #define TARGET_REGISTER_MOVE_COST m32c_register_move_cost
2185 
2186 static int
2187 m32c_register_move_cost (machine_mode mode, reg_class_t from,
2188 			 reg_class_t to)
2189 {
2190   int cost = COSTS_N_INSNS (3);
2191   HARD_REG_SET cc;
2192 
2193 /* FIXME: pick real values, but not 2 for now.  */
2194   COPY_HARD_REG_SET (cc, reg_class_contents[(int) from]);
2195   IOR_HARD_REG_SET (cc, reg_class_contents[(int) to]);
2196 
2197   if (mode == QImode
2198       && hard_reg_set_intersect_p (cc, reg_class_contents[R23_REGS]))
2199     {
2200       if (hard_reg_set_subset_p (cc, reg_class_contents[R23_REGS]))
2201 	cost = COSTS_N_INSNS (1000);
2202       else
2203 	cost = COSTS_N_INSNS (80);
2204     }
2205 
2206   if (!class_can_hold_mode (from, mode) || !class_can_hold_mode (to, mode))
2207     cost = COSTS_N_INSNS (1000);
2208 
2209   if (reg_classes_intersect_p (from, CR_REGS))
2210     cost += COSTS_N_INSNS (5);
2211 
2212   if (reg_classes_intersect_p (to, CR_REGS))
2213     cost += COSTS_N_INSNS (5);
2214 
2215   if (from == MEM_REGS || to == MEM_REGS)
2216     cost += COSTS_N_INSNS (50);
2217   else if (reg_classes_intersect_p (from, MEM_REGS)
2218 	   || reg_classes_intersect_p (to, MEM_REGS))
2219     cost += COSTS_N_INSNS (10);
2220 
2221 #if DEBUG0
2222   fprintf (stderr, "register_move_cost %s from %s to %s = %d\n",
2223 	   mode_name[mode], class_names[(int) from], class_names[(int) to],
2224 	   cost);
2225 #endif
2226   return cost;
2227 }
2228 
2229 /*  Implements TARGET_MEMORY_MOVE_COST.  */
2230 
2231 #undef TARGET_MEMORY_MOVE_COST
2232 #define TARGET_MEMORY_MOVE_COST m32c_memory_move_cost
2233 
2234 static int
2235 m32c_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
2236 		       reg_class_t rclass ATTRIBUTE_UNUSED,
2237 		       bool in ATTRIBUTE_UNUSED)
2238 {
2239   /* FIXME: pick real values.  */
2240   return COSTS_N_INSNS (10);
2241 }
2242 
2243 /* Here we try to describe when we use multiple opcodes for one RTX so
2244    that gcc knows when to use them.  */
2245 #undef TARGET_RTX_COSTS
2246 #define TARGET_RTX_COSTS m32c_rtx_costs
2247 static bool
2248 m32c_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
2249 		int *total, bool speed ATTRIBUTE_UNUSED)
2250 {
2251   switch (code)
2252     {
2253     case REG:
2254       if (REGNO (x) >= MEM0_REGNO && REGNO (x) <= MEM7_REGNO)
2255 	*total += COSTS_N_INSNS (500);
2256       else
2257 	*total += COSTS_N_INSNS (1);
2258       return true;
2259 
2260     case ASHIFT:
2261     case LSHIFTRT:
2262     case ASHIFTRT:
2263       if (GET_CODE (XEXP (x, 1)) != CONST_INT)
2264 	{
2265 	  /* mov.b r1l, r1h */
2266 	  *total +=  COSTS_N_INSNS (1);
2267 	  return true;
2268 	}
2269       if (INTVAL (XEXP (x, 1)) > 8
2270 	  || INTVAL (XEXP (x, 1)) < -8)
2271 	{
2272 	  /* mov.b #N, r1l */
2273 	  /* mov.b r1l, r1h */
2274 	  *total +=  COSTS_N_INSNS (2);
2275 	  return true;
2276 	}
2277       return true;
2278 
2279     case LE:
2280     case LEU:
2281     case LT:
2282     case LTU:
2283     case GT:
2284     case GTU:
2285     case GE:
2286     case GEU:
2287     case NE:
2288     case EQ:
2289       if (outer_code == SET)
2290 	{
2291 	  *total += COSTS_N_INSNS (2);
2292 	  return true;
2293 	}
2294       break;
2295 
2296     case ZERO_EXTRACT:
2297       {
2298 	rtx dest = XEXP (x, 0);
2299 	rtx addr = XEXP (dest, 0);
2300 	switch (GET_CODE (addr))
2301 	  {
2302 	  case CONST_INT:
2303 	    *total += COSTS_N_INSNS (1);
2304 	    break;
2305 	  case SYMBOL_REF:
2306 	    *total += COSTS_N_INSNS (3);
2307 	    break;
2308 	  default:
2309 	    *total += COSTS_N_INSNS (2);
2310 	    break;
2311 	  }
2312 	return true;
2313       }
2314       break;
2315 
2316     default:
2317       /* Reasonable default.  */
2318       if (TARGET_A16 && GET_MODE(x) == SImode)
2319 	*total += COSTS_N_INSNS (2);
2320       break;
2321     }
2322   return false;
2323 }
2324 
2325 #undef TARGET_ADDRESS_COST
2326 #define TARGET_ADDRESS_COST m32c_address_cost
2327 static int
2328 m32c_address_cost (rtx addr, machine_mode mode ATTRIBUTE_UNUSED,
2329 		   addr_space_t as ATTRIBUTE_UNUSED,
2330 		   bool speed ATTRIBUTE_UNUSED)
2331 {
2332   int i;
2333   /*  fprintf(stderr, "\naddress_cost\n");
2334       debug_rtx(addr);*/
2335   switch (GET_CODE (addr))
2336     {
2337     case CONST_INT:
2338       i = INTVAL (addr);
2339       if (i == 0)
2340 	return COSTS_N_INSNS(1);
2341       if (0 < i && i <= 255)
2342 	return COSTS_N_INSNS(2);
2343       if (0 < i && i <= 65535)
2344 	return COSTS_N_INSNS(3);
2345       return COSTS_N_INSNS(4);
2346     case SYMBOL_REF:
2347       return COSTS_N_INSNS(4);
2348     case REG:
2349       return COSTS_N_INSNS(1);
2350     case PLUS:
2351       if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
2352 	{
2353 	  i = INTVAL (XEXP (addr, 1));
2354 	  if (i == 0)
2355 	    return COSTS_N_INSNS(1);
2356 	  if (0 < i && i <= 255)
2357 	    return COSTS_N_INSNS(2);
2358 	  if (0 < i && i <= 65535)
2359 	    return COSTS_N_INSNS(3);
2360 	}
2361       return COSTS_N_INSNS(4);
2362     default:
2363       return 0;
2364     }
2365 }
2366 
2367 /* Defining the Output Assembler Language */
2368 
2369 /* Output of Data */
2370 
2371 /* We may have 24 bit sizes, which is the native address size.
2372    Currently unused, but provided for completeness.  */
2373 #undef TARGET_ASM_INTEGER
2374 #define TARGET_ASM_INTEGER m32c_asm_integer
2375 static bool
2376 m32c_asm_integer (rtx x, unsigned int size, int aligned_p)
2377 {
2378   switch (size)
2379     {
2380     case 3:
2381       fprintf (asm_out_file, "\t.3byte\t");
2382       output_addr_const (asm_out_file, x);
2383       fputc ('\n', asm_out_file);
2384       return true;
2385     case 4:
2386       if (GET_CODE (x) == SYMBOL_REF)
2387 	{
2388 	  fprintf (asm_out_file, "\t.long\t");
2389 	  output_addr_const (asm_out_file, x);
2390 	  fputc ('\n', asm_out_file);
2391 	  return true;
2392 	}
2393       break;
2394     }
2395   return default_assemble_integer (x, size, aligned_p);
2396 }
2397 
2398 /* Output of Assembler Instructions */
2399 
2400 /* We use a lookup table because the addressing modes are non-orthogonal.  */
2401 
2402 static struct
2403 {
2404   char code;
2405   char const *pattern;
2406   char const *format;
2407 }
2408 const conversions[] = {
2409   { 0, "r", "0" },
2410 
2411   { 0, "mr", "z[1]" },
2412   { 0, "m+ri", "3[2]" },
2413   { 0, "m+rs", "3[2]" },
2414   { 0, "m+^Zrs", "5[4]" },
2415   { 0, "m+^Zri", "5[4]" },
2416   { 0, "m+^Z+ris", "7+6[5]" },
2417   { 0, "m+^Srs", "5[4]" },
2418   { 0, "m+^Sri", "5[4]" },
2419   { 0, "m+^S+ris", "7+6[5]" },
2420   { 0, "m+r+si", "4+5[2]" },
2421   { 0, "ms", "1" },
2422   { 0, "mi", "1" },
2423   { 0, "m+si", "2+3" },
2424 
2425   { 0, "mmr", "[z[2]]" },
2426   { 0, "mm+ri", "[4[3]]" },
2427   { 0, "mm+rs", "[4[3]]" },
2428   { 0, "mm+r+si", "[5+6[3]]" },
2429   { 0, "mms", "[[2]]" },
2430   { 0, "mmi", "[[2]]" },
2431   { 0, "mm+si", "[4[3]]" },
2432 
2433   { 0, "i", "#0" },
2434   { 0, "s", "#0" },
2435   { 0, "+si", "#1+2" },
2436   { 0, "l", "#0" },
2437 
2438   { 'l', "l", "0" },
2439   { 'd', "i", "0" },
2440   { 'd', "s", "0" },
2441   { 'd', "+si", "1+2" },
2442   { 'D', "i", "0" },
2443   { 'D', "s", "0" },
2444   { 'D', "+si", "1+2" },
2445   { 'x', "i", "#0" },
2446   { 'X', "i", "#0" },
2447   { 'm', "i", "#0" },
2448   { 'b', "i", "#0" },
2449   { 'B', "i", "0" },
2450   { 'p', "i", "0" },
2451 
2452   { 0, 0, 0 }
2453 };
2454 
2455 /* This is in order according to the bitfield that pushm/popm use.  */
2456 static char const *pushm_regs[] = {
2457   "fb", "sb", "a1", "a0", "r3", "r2", "r1", "r0"
2458 };
2459 
2460 /* Implements TARGET_PRINT_OPERAND.  */
2461 
2462 #undef TARGET_PRINT_OPERAND
2463 #define TARGET_PRINT_OPERAND m32c_print_operand
2464 
2465 static void
2466 m32c_print_operand (FILE * file, rtx x, int code)
2467 {
2468   int i, j, b;
2469   const char *comma;
2470   HOST_WIDE_INT ival;
2471   int unsigned_const = 0;
2472   int force_sign;
2473 
2474   /* Multiplies; constants are converted to sign-extended format but
2475    we need unsigned, so 'u' and 'U' tell us what size unsigned we
2476    need.  */
2477   if (code == 'u')
2478     {
2479       unsigned_const = 2;
2480       code = 0;
2481     }
2482   if (code == 'U')
2483     {
2484       unsigned_const = 1;
2485       code = 0;
2486     }
2487   /* This one is only for debugging; you can put it in a pattern to
2488      force this error.  */
2489   if (code == '!')
2490     {
2491       fprintf (stderr, "dj: unreviewed pattern:");
2492       if (current_output_insn)
2493 	debug_rtx (current_output_insn);
2494       gcc_unreachable ();
2495     }
2496   /* PSImode operations are either .w or .l depending on the target.  */
2497   if (code == '&')
2498     {
2499       if (TARGET_A16)
2500 	fprintf (file, "w");
2501       else
2502 	fprintf (file, "l");
2503       return;
2504     }
2505   /* Inverted conditionals.  */
2506   if (code == 'C')
2507     {
2508       switch (GET_CODE (x))
2509 	{
2510 	case LE:
2511 	  fputs ("gt", file);
2512 	  break;
2513 	case LEU:
2514 	  fputs ("gtu", file);
2515 	  break;
2516 	case LT:
2517 	  fputs ("ge", file);
2518 	  break;
2519 	case LTU:
2520 	  fputs ("geu", file);
2521 	  break;
2522 	case GT:
2523 	  fputs ("le", file);
2524 	  break;
2525 	case GTU:
2526 	  fputs ("leu", file);
2527 	  break;
2528 	case GE:
2529 	  fputs ("lt", file);
2530 	  break;
2531 	case GEU:
2532 	  fputs ("ltu", file);
2533 	  break;
2534 	case NE:
2535 	  fputs ("eq", file);
2536 	  break;
2537 	case EQ:
2538 	  fputs ("ne", file);
2539 	  break;
2540 	default:
2541 	  gcc_unreachable ();
2542 	}
2543       return;
2544     }
2545   /* Regular conditionals.  */
2546   if (code == 'c')
2547     {
2548       switch (GET_CODE (x))
2549 	{
2550 	case LE:
2551 	  fputs ("le", file);
2552 	  break;
2553 	case LEU:
2554 	  fputs ("leu", file);
2555 	  break;
2556 	case LT:
2557 	  fputs ("lt", file);
2558 	  break;
2559 	case LTU:
2560 	  fputs ("ltu", file);
2561 	  break;
2562 	case GT:
2563 	  fputs ("gt", file);
2564 	  break;
2565 	case GTU:
2566 	  fputs ("gtu", file);
2567 	  break;
2568 	case GE:
2569 	  fputs ("ge", file);
2570 	  break;
2571 	case GEU:
2572 	  fputs ("geu", file);
2573 	  break;
2574 	case NE:
2575 	  fputs ("ne", file);
2576 	  break;
2577 	case EQ:
2578 	  fputs ("eq", file);
2579 	  break;
2580 	default:
2581 	  gcc_unreachable ();
2582 	}
2583       return;
2584     }
2585   /* Used in negsi2 to do HImode ops on the two parts of an SImode
2586      operand.  */
2587   if (code == 'h' && GET_MODE (x) == SImode)
2588     {
2589       x = m32c_subreg (HImode, x, SImode, 0);
2590       code = 0;
2591     }
2592   if (code == 'H' && GET_MODE (x) == SImode)
2593     {
2594       x = m32c_subreg (HImode, x, SImode, 2);
2595       code = 0;
2596     }
2597   if (code == 'h' && GET_MODE (x) == HImode)
2598     {
2599       x = m32c_subreg (QImode, x, HImode, 0);
2600       code = 0;
2601     }
2602   if (code == 'H' && GET_MODE (x) == HImode)
2603     {
2604       /* We can't actually represent this as an rtx.  Do it here.  */
2605       if (GET_CODE (x) == REG)
2606 	{
2607 	  switch (REGNO (x))
2608 	    {
2609 	    case R0_REGNO:
2610 	      fputs ("r0h", file);
2611 	      return;
2612 	    case R1_REGNO:
2613 	      fputs ("r1h", file);
2614 	      return;
2615 	    default:
2616 	      gcc_unreachable();
2617 	    }
2618 	}
2619       /* This should be a MEM.  */
2620       x = m32c_subreg (QImode, x, HImode, 1);
2621       code = 0;
2622     }
2623   /* This is for BMcond, which always wants word register names.  */
2624   if (code == 'h' && GET_MODE (x) == QImode)
2625     {
2626       if (GET_CODE (x) == REG)
2627 	x = gen_rtx_REG (HImode, REGNO (x));
2628       code = 0;
2629     }
2630   /* 'x' and 'X' need to be ignored for non-immediates.  */
2631   if ((code == 'x' || code == 'X') && GET_CODE (x) != CONST_INT)
2632     code = 0;
2633 
2634   encode_pattern (x);
2635   force_sign = 0;
2636   for (i = 0; conversions[i].pattern; i++)
2637     if (conversions[i].code == code
2638 	&& streq (conversions[i].pattern, pattern))
2639       {
2640 	for (j = 0; conversions[i].format[j]; j++)
2641 	  /* backslash quotes the next character in the output pattern.  */
2642 	  if (conversions[i].format[j] == '\\')
2643 	    {
2644 	      fputc (conversions[i].format[j + 1], file);
2645 	      j++;
2646 	    }
2647 	  /* Digits in the output pattern indicate that the
2648 	     corresponding RTX is to be output at that point.  */
2649 	  else if (ISDIGIT (conversions[i].format[j]))
2650 	    {
2651 	      rtx r = patternr[conversions[i].format[j] - '0'];
2652 	      switch (GET_CODE (r))
2653 		{
2654 		case REG:
2655 		  fprintf (file, "%s",
2656 			   reg_name_with_mode (REGNO (r), GET_MODE (r)));
2657 		  break;
2658 		case CONST_INT:
2659 		  switch (code)
2660 		    {
2661 		    case 'b':
2662 		    case 'B':
2663 		      {
2664 			int v = INTVAL (r);
2665 			int i = (int) exact_log2 (v);
2666 			if (i == -1)
2667 			  i = (int) exact_log2 ((v ^ 0xffff) & 0xffff);
2668 			if (i == -1)
2669 			  i = (int) exact_log2 ((v ^ 0xff) & 0xff);
2670 			/* Bit position.  */
2671 			fprintf (file, "%d", i);
2672 		      }
2673 		      break;
2674 		    case 'x':
2675 		      /* Unsigned byte.  */
2676 		      fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2677 			       INTVAL (r) & 0xff);
2678 		      break;
2679 		    case 'X':
2680 		      /* Unsigned word.  */
2681 		      fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2682 			       INTVAL (r) & 0xffff);
2683 		      break;
2684 		    case 'p':
2685 		      /* pushm and popm encode a register set into a single byte.  */
2686 		      comma = "";
2687 		      for (b = 7; b >= 0; b--)
2688 			if (INTVAL (r) & (1 << b))
2689 			  {
2690 			    fprintf (file, "%s%s", comma, pushm_regs[b]);
2691 			    comma = ",";
2692 			  }
2693 		      break;
2694 		    case 'm':
2695 		      /* "Minus".  Output -X  */
2696 		      ival = (-INTVAL (r) & 0xffff);
2697 		      if (ival & 0x8000)
2698 			ival = ival - 0x10000;
2699 		      fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2700 		      break;
2701 		    default:
2702 		      ival = INTVAL (r);
2703 		      if (conversions[i].format[j + 1] == '[' && ival < 0)
2704 			{
2705 			  /* We can simulate negative displacements by
2706 			     taking advantage of address space
2707 			     wrapping when the offset can span the
2708 			     entire address range.  */
2709 			  rtx base =
2710 			    patternr[conversions[i].format[j + 2] - '0'];
2711 			  if (GET_CODE (base) == REG)
2712 			    switch (REGNO (base))
2713 			      {
2714 			      case A0_REGNO:
2715 			      case A1_REGNO:
2716 				if (TARGET_A24)
2717 				  ival = 0x1000000 + ival;
2718 				else
2719 				  ival = 0x10000 + ival;
2720 				break;
2721 			      case SB_REGNO:
2722 				if (TARGET_A16)
2723 				  ival = 0x10000 + ival;
2724 				break;
2725 			      }
2726 			}
2727 		      else if (code == 'd' && ival < 0 && j == 0)
2728 			/* The "mova" opcode is used to do addition by
2729 			   computing displacements, but again, we need
2730 			   displacements to be unsigned *if* they're
2731 			   the only component of the displacement
2732 			   (i.e. no "symbol-4" type displacement).  */
2733 			ival = (TARGET_A24 ? 0x1000000 : 0x10000) + ival;
2734 
2735 		      if (conversions[i].format[j] == '0')
2736 			{
2737 			  /* More conversions to unsigned.  */
2738 			  if (unsigned_const == 2)
2739 			    ival &= 0xffff;
2740 			  if (unsigned_const == 1)
2741 			    ival &= 0xff;
2742 			}
2743 		      if (streq (conversions[i].pattern, "mi")
2744 			  || streq (conversions[i].pattern, "mmi"))
2745 			{
2746 			  /* Integers used as addresses are unsigned.  */
2747 			  ival &= (TARGET_A24 ? 0xffffff : 0xffff);
2748 			}
2749 		      if (force_sign && ival >= 0)
2750 			fputc ('+', file);
2751 		      fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2752 		      break;
2753 		    }
2754 		  break;
2755 		case CONST_DOUBLE:
2756 		  /* We don't have const_double constants.  If it
2757 		     happens, make it obvious.  */
2758 		  fprintf (file, "[const_double 0x%lx]",
2759 			   (unsigned long) CONST_DOUBLE_HIGH (r));
2760 		  break;
2761 		case SYMBOL_REF:
2762 		  assemble_name (file, XSTR (r, 0));
2763 		  break;
2764 		case LABEL_REF:
2765 		  output_asm_label (r);
2766 		  break;
2767 		default:
2768 		  fprintf (stderr, "don't know how to print this operand:");
2769 		  debug_rtx (r);
2770 		  gcc_unreachable ();
2771 		}
2772 	    }
2773 	  else
2774 	    {
2775 	      if (conversions[i].format[j] == 'z')
2776 		{
2777 		  /* Some addressing modes *must* have a displacement,
2778 		     so insert a zero here if needed.  */
2779 		  int k;
2780 		  for (k = j + 1; conversions[i].format[k]; k++)
2781 		    if (ISDIGIT (conversions[i].format[k]))
2782 		      {
2783 			rtx reg = patternr[conversions[i].format[k] - '0'];
2784 			if (GET_CODE (reg) == REG
2785 			    && (REGNO (reg) == SB_REGNO
2786 				|| REGNO (reg) == FB_REGNO
2787 				|| REGNO (reg) == SP_REGNO))
2788 			  fputc ('0', file);
2789 		      }
2790 		  continue;
2791 		}
2792 	      /* Signed displacements off symbols need to have signs
2793 		 blended cleanly.  */
2794 	      if (conversions[i].format[j] == '+'
2795 		  && (!code || code == 'D' || code == 'd')
2796 		  && ISDIGIT (conversions[i].format[j + 1])
2797 		  && (GET_CODE (patternr[conversions[i].format[j + 1] - '0'])
2798 		      == CONST_INT))
2799 		{
2800 		  force_sign = 1;
2801 		  continue;
2802 		}
2803 	      fputc (conversions[i].format[j], file);
2804 	    }
2805 	break;
2806       }
2807   if (!conversions[i].pattern)
2808     {
2809       fprintf (stderr, "unconvertible operand %c `%s'", code ? code : '-',
2810 	       pattern);
2811       debug_rtx (x);
2812       fprintf (file, "[%c.%s]", code ? code : '-', pattern);
2813     }
2814 
2815   return;
2816 }
2817 
2818 /* Implements TARGET_PRINT_OPERAND_PUNCT_VALID_P.
2819 
2820    See m32c_print_operand above for descriptions of what these do.  */
2821 
2822 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
2823 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P m32c_print_operand_punct_valid_p
2824 
2825 static bool
2826 m32c_print_operand_punct_valid_p (unsigned char c)
2827 {
2828   if (c == '&' || c == '!')
2829     return true;
2830 
2831   return false;
2832 }
2833 
2834 /* Implements TARGET_PRINT_OPERAND_ADDRESS.  Nothing unusual here.  */
2835 
2836 #undef TARGET_PRINT_OPERAND_ADDRESS
2837 #define TARGET_PRINT_OPERAND_ADDRESS m32c_print_operand_address
2838 
2839 static void
2840 m32c_print_operand_address (FILE * stream, rtx address)
2841 {
2842   if (GET_CODE (address) == MEM)
2843     address = XEXP (address, 0);
2844   else
2845     /* cf: gcc.dg/asm-4.c.  */
2846     gcc_assert (GET_CODE (address) == REG);
2847 
2848   m32c_print_operand (stream, address, 0);
2849 }
2850 
2851 /* Implements ASM_OUTPUT_REG_PUSH.  Control registers are pushed
2852    differently than general registers.  */
2853 void
2854 m32c_output_reg_push (FILE * s, int regno)
2855 {
2856   if (regno == FLG_REGNO)
2857     fprintf (s, "\tpushc\tflg\n");
2858   else
2859     fprintf (s, "\tpush.%c\t%s\n",
2860 	     " bwll"[reg_push_size (regno)], reg_names[regno]);
2861 }
2862 
2863 /* Likewise for ASM_OUTPUT_REG_POP.  */
2864 void
2865 m32c_output_reg_pop (FILE * s, int regno)
2866 {
2867   if (regno == FLG_REGNO)
2868     fprintf (s, "\tpopc\tflg\n");
2869   else
2870     fprintf (s, "\tpop.%c\t%s\n",
2871 	     " bwll"[reg_push_size (regno)], reg_names[regno]);
2872 }
2873 
2874 /* Defining target-specific uses of `__attribute__' */
2875 
2876 /* Used to simplify the logic below.  Find the attributes wherever
2877    they may be.  */
2878 #define M32C_ATTRIBUTES(decl) \
2879   (TYPE_P (decl)) ? TYPE_ATTRIBUTES (decl) \
2880                 : DECL_ATTRIBUTES (decl) \
2881                   ? (DECL_ATTRIBUTES (decl)) \
2882 		  : TYPE_ATTRIBUTES (TREE_TYPE (decl))
2883 
2884 /* Returns TRUE if the given tree has the "interrupt" attribute.  */
2885 static int
2886 interrupt_p (tree node ATTRIBUTE_UNUSED)
2887 {
2888   tree list = M32C_ATTRIBUTES (node);
2889   while (list)
2890     {
2891       if (is_attribute_p ("interrupt", TREE_PURPOSE (list)))
2892 	return 1;
2893       list = TREE_CHAIN (list);
2894     }
2895   return fast_interrupt_p (node);
2896 }
2897 
2898 /* Returns TRUE if the given tree has the "bank_switch" attribute.  */
2899 static int
2900 bank_switch_p (tree node ATTRIBUTE_UNUSED)
2901 {
2902   tree list = M32C_ATTRIBUTES (node);
2903   while (list)
2904     {
2905       if (is_attribute_p ("bank_switch", TREE_PURPOSE (list)))
2906 	return 1;
2907       list = TREE_CHAIN (list);
2908     }
2909   return 0;
2910 }
2911 
2912 /* Returns TRUE if the given tree has the "fast_interrupt" attribute.  */
2913 static int
2914 fast_interrupt_p (tree node ATTRIBUTE_UNUSED)
2915 {
2916   tree list = M32C_ATTRIBUTES (node);
2917   while (list)
2918     {
2919       if (is_attribute_p ("fast_interrupt", TREE_PURPOSE (list)))
2920 	return 1;
2921       list = TREE_CHAIN (list);
2922     }
2923   return 0;
2924 }
2925 
2926 static tree
2927 interrupt_handler (tree * node ATTRIBUTE_UNUSED,
2928 		   tree name ATTRIBUTE_UNUSED,
2929 		   tree args ATTRIBUTE_UNUSED,
2930 		   int flags ATTRIBUTE_UNUSED,
2931 		   bool * no_add_attrs ATTRIBUTE_UNUSED)
2932 {
2933   return NULL_TREE;
2934 }
2935 
2936 /* Returns TRUE if given tree has the "function_vector" attribute. */
2937 int
2938 m32c_special_page_vector_p (tree func)
2939 {
2940   tree list;
2941 
2942   if (TREE_CODE (func) != FUNCTION_DECL)
2943     return 0;
2944 
2945   list = M32C_ATTRIBUTES (func);
2946   while (list)
2947     {
2948       if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
2949         return 1;
2950       list = TREE_CHAIN (list);
2951     }
2952   return 0;
2953 }
2954 
2955 static tree
2956 function_vector_handler (tree * node ATTRIBUTE_UNUSED,
2957                          tree name ATTRIBUTE_UNUSED,
2958                          tree args ATTRIBUTE_UNUSED,
2959                          int flags ATTRIBUTE_UNUSED,
2960                          bool * no_add_attrs ATTRIBUTE_UNUSED)
2961 {
2962   if (TARGET_R8C)
2963     {
2964       /* The attribute is not supported for R8C target.  */
2965       warning (OPT_Wattributes,
2966                 "%qE attribute is not supported for R8C target",
2967                 name);
2968       *no_add_attrs = true;
2969     }
2970   else if (TREE_CODE (*node) != FUNCTION_DECL)
2971     {
2972       /* The attribute must be applied to functions only.  */
2973       warning (OPT_Wattributes,
2974                 "%qE attribute applies only to functions",
2975                 name);
2976       *no_add_attrs = true;
2977     }
2978   else if (TREE_CODE (TREE_VALUE (args)) != INTEGER_CST)
2979     {
2980       /* The argument must be a constant integer.  */
2981       warning (OPT_Wattributes,
2982                 "%qE attribute argument not an integer constant",
2983                 name);
2984       *no_add_attrs = true;
2985     }
2986   else if (TREE_INT_CST_LOW (TREE_VALUE (args)) < 18
2987            || TREE_INT_CST_LOW (TREE_VALUE (args)) > 255)
2988     {
2989       /* The argument value must be between 18 to 255.  */
2990       warning (OPT_Wattributes,
2991                 "%qE attribute argument should be between 18 to 255",
2992                 name);
2993       *no_add_attrs = true;
2994     }
2995   return NULL_TREE;
2996 }
2997 
2998 /* If the function is assigned the attribute 'function_vector', it
2999    returns the function vector number, otherwise returns zero.  */
3000 int
3001 current_function_special_page_vector (rtx x)
3002 {
3003   int num;
3004 
3005   if ((GET_CODE(x) == SYMBOL_REF)
3006       && (SYMBOL_REF_FLAGS (x) & SYMBOL_FLAG_FUNCVEC_FUNCTION))
3007     {
3008       tree list;
3009       tree t = SYMBOL_REF_DECL (x);
3010 
3011       if (TREE_CODE (t) != FUNCTION_DECL)
3012         return 0;
3013 
3014       list = M32C_ATTRIBUTES (t);
3015       while (list)
3016         {
3017           if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
3018             {
3019               num = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (list)));
3020               return num;
3021             }
3022 
3023           list = TREE_CHAIN (list);
3024         }
3025 
3026       return 0;
3027     }
3028   else
3029     return 0;
3030 }
3031 
3032 #undef TARGET_ATTRIBUTE_TABLE
3033 #define TARGET_ATTRIBUTE_TABLE m32c_attribute_table
3034 static const struct attribute_spec m32c_attribute_table[] = {
3035   {"interrupt", 0, 0, false, false, false, interrupt_handler, false},
3036   {"bank_switch", 0, 0, false, false, false, interrupt_handler, false},
3037   {"fast_interrupt", 0, 0, false, false, false, interrupt_handler, false},
3038   {"function_vector", 1, 1, true,  false, false, function_vector_handler,
3039    false},
3040   {0, 0, 0, 0, 0, 0, 0, false}
3041 };
3042 
3043 #undef TARGET_COMP_TYPE_ATTRIBUTES
3044 #define TARGET_COMP_TYPE_ATTRIBUTES m32c_comp_type_attributes
3045 static int
3046 m32c_comp_type_attributes (const_tree type1 ATTRIBUTE_UNUSED,
3047 			   const_tree type2 ATTRIBUTE_UNUSED)
3048 {
3049   /* 0=incompatible 1=compatible 2=warning */
3050   return 1;
3051 }
3052 
3053 #undef TARGET_INSERT_ATTRIBUTES
3054 #define TARGET_INSERT_ATTRIBUTES m32c_insert_attributes
3055 static void
3056 m32c_insert_attributes (tree node ATTRIBUTE_UNUSED,
3057 			tree * attr_ptr ATTRIBUTE_UNUSED)
3058 {
3059   unsigned addr;
3060   /* See if we need to make #pragma address variables volatile.  */
3061 
3062   if (TREE_CODE (node) == VAR_DECL)
3063     {
3064       const char *name = IDENTIFIER_POINTER (DECL_NAME (node));
3065       if (m32c_get_pragma_address  (name, &addr))
3066 	{
3067 	  TREE_THIS_VOLATILE (node) = true;
3068 	}
3069     }
3070 }
3071 
3072 
3073 struct pragma_traits : default_hashmap_traits
3074 {
3075   static hashval_t hash (const char *str) { return htab_hash_string (str); }
3076   static bool
3077   equal_keys (const char *a, const char *b)
3078   {
3079     return !strcmp (a, b);
3080   }
3081 };
3082 
3083 /* Hash table of pragma info.  */
3084 static GTY(()) hash_map<const char *, unsigned, pragma_traits> *pragma_htab;
3085 
3086 void
3087 m32c_note_pragma_address (const char *varname, unsigned address)
3088 {
3089   if (!pragma_htab)
3090     pragma_htab
3091       = hash_map<const char *, unsigned, pragma_traits>::create_ggc (31);
3092 
3093   const char *name = ggc_strdup (varname);
3094   unsigned int *slot = &pragma_htab->get_or_insert (name);
3095   *slot = address;
3096 }
3097 
3098 static bool
3099 m32c_get_pragma_address (const char *varname, unsigned *address)
3100 {
3101   if (!pragma_htab)
3102     return false;
3103 
3104   unsigned int *slot = pragma_htab->get (varname);
3105   if (slot)
3106     {
3107       *address = *slot;
3108       return true;
3109     }
3110   return false;
3111 }
3112 
3113 void
3114 m32c_output_aligned_common (FILE *stream, tree decl ATTRIBUTE_UNUSED,
3115 			    const char *name,
3116 			    int size, int align, int global)
3117 {
3118   unsigned address;
3119 
3120   if (m32c_get_pragma_address (name, &address))
3121     {
3122       /* We never output these as global.  */
3123       assemble_name (stream, name);
3124       fprintf (stream, " = 0x%04x\n", address);
3125       return;
3126     }
3127   if (!global)
3128     {
3129       fprintf (stream, "\t.local\t");
3130       assemble_name (stream, name);
3131       fprintf (stream, "\n");
3132     }
3133   fprintf (stream, "\t.comm\t");
3134   assemble_name (stream, name);
3135   fprintf (stream, ",%u,%u\n", size, align / BITS_PER_UNIT);
3136 }
3137 
3138 /* Predicates */
3139 
3140 /* This is a list of legal subregs of hard regs.  */
3141 static const struct {
3142   unsigned char outer_mode_size;
3143   unsigned char inner_mode_size;
3144   unsigned char byte_mask;
3145   unsigned char legal_when;
3146   unsigned int regno;
3147 } legal_subregs[] = {
3148   {1, 2, 0x03, 1, R0_REGNO}, /* r0h r0l */
3149   {1, 2, 0x03, 1, R1_REGNO}, /* r1h r1l */
3150   {1, 2, 0x01, 1, A0_REGNO},
3151   {1, 2, 0x01, 1, A1_REGNO},
3152 
3153   {1, 4, 0x01, 1, A0_REGNO},
3154   {1, 4, 0x01, 1, A1_REGNO},
3155 
3156   {2, 4, 0x05, 1, R0_REGNO}, /* r2 r0 */
3157   {2, 4, 0x05, 1, R1_REGNO}, /* r3 r1 */
3158   {2, 4, 0x05, 16, A0_REGNO}, /* a1 a0 */
3159   {2, 4, 0x01, 24, A0_REGNO}, /* a1 a0 */
3160   {2, 4, 0x01, 24, A1_REGNO}, /* a1 a0 */
3161 
3162   {4, 8, 0x55, 1, R0_REGNO}, /* r3 r1 r2 r0 */
3163 };
3164 
3165 /* Returns TRUE if OP is a subreg of a hard reg which we don't
3166    support.  We also bail on MEMs with illegal addresses.  */
3167 bool
3168 m32c_illegal_subreg_p (rtx op)
3169 {
3170   int offset;
3171   unsigned int i;
3172   machine_mode src_mode, dest_mode;
3173 
3174   if (GET_CODE (op) == MEM
3175       && ! m32c_legitimate_address_p (Pmode, XEXP (op, 0), false))
3176     {
3177       return true;
3178     }
3179 
3180   if (GET_CODE (op) != SUBREG)
3181     return false;
3182 
3183   dest_mode = GET_MODE (op);
3184   offset = SUBREG_BYTE (op);
3185   op = SUBREG_REG (op);
3186   src_mode = GET_MODE (op);
3187 
3188   if (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (src_mode))
3189     return false;
3190   if (GET_CODE (op) != REG)
3191     return false;
3192   if (REGNO (op) >= MEM0_REGNO)
3193     return false;
3194 
3195   offset = (1 << offset);
3196 
3197   for (i = 0; i < ARRAY_SIZE (legal_subregs); i ++)
3198     if (legal_subregs[i].outer_mode_size == GET_MODE_SIZE (dest_mode)
3199 	&& legal_subregs[i].regno == REGNO (op)
3200 	&& legal_subregs[i].inner_mode_size == GET_MODE_SIZE (src_mode)
3201 	&& legal_subregs[i].byte_mask & offset)
3202       {
3203 	switch (legal_subregs[i].legal_when)
3204 	  {
3205 	  case 1:
3206 	    return false;
3207 	  case 16:
3208 	    if (TARGET_A16)
3209 	      return false;
3210 	    break;
3211 	  case 24:
3212 	    if (TARGET_A24)
3213 	      return false;
3214 	    break;
3215 	  }
3216       }
3217   return true;
3218 }
3219 
3220 /* Returns TRUE if we support a move between the first two operands.
3221    At the moment, we just want to discourage mem to mem moves until
3222    after reload, because reload has a hard time with our limited
3223    number of address registers, and we can get into a situation where
3224    we need three of them when we only have two.  */
3225 bool
3226 m32c_mov_ok (rtx * operands, machine_mode mode ATTRIBUTE_UNUSED)
3227 {
3228   rtx op0 = operands[0];
3229   rtx op1 = operands[1];
3230 
3231   if (TARGET_A24)
3232     return true;
3233 
3234 #define DEBUG_MOV_OK 0
3235 #if DEBUG_MOV_OK
3236   fprintf (stderr, "m32c_mov_ok %s\n", mode_name[mode]);
3237   debug_rtx (op0);
3238   debug_rtx (op1);
3239 #endif
3240 
3241   if (GET_CODE (op0) == SUBREG)
3242     op0 = XEXP (op0, 0);
3243   if (GET_CODE (op1) == SUBREG)
3244     op1 = XEXP (op1, 0);
3245 
3246   if (GET_CODE (op0) == MEM
3247       && GET_CODE (op1) == MEM
3248       && ! reload_completed)
3249     {
3250 #if DEBUG_MOV_OK
3251       fprintf (stderr, " - no, mem to mem\n");
3252 #endif
3253       return false;
3254     }
3255 
3256 #if DEBUG_MOV_OK
3257   fprintf (stderr, " - ok\n");
3258 #endif
3259   return true;
3260 }
3261 
3262 /* Returns TRUE if two consecutive HImode mov instructions, generated
3263    for moving an immediate double data to a double data type variable
3264    location, can be combined into single SImode mov instruction.  */
3265 bool
3266 m32c_immd_dbl_mov (rtx * operands ATTRIBUTE_UNUSED,
3267 		   machine_mode mode ATTRIBUTE_UNUSED)
3268 {
3269   /* ??? This relied on the now-defunct MEM_SCALAR and MEM_IN_STRUCT_P
3270      flags.  */
3271   return false;
3272 }
3273 
3274 /* Expanders */
3275 
3276 /* Subregs are non-orthogonal for us, because our registers are all
3277    different sizes.  */
3278 static rtx
3279 m32c_subreg (machine_mode outer,
3280 	     rtx x, machine_mode inner, int byte)
3281 {
3282   int r, nr = -1;
3283 
3284   /* Converting MEMs to different types that are the same size, we
3285      just rewrite them.  */
3286   if (GET_CODE (x) == SUBREG
3287       && SUBREG_BYTE (x) == 0
3288       && GET_CODE (SUBREG_REG (x)) == MEM
3289       && (GET_MODE_SIZE (GET_MODE (x))
3290 	  == GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
3291     {
3292       rtx oldx = x;
3293       x = gen_rtx_MEM (GET_MODE (x), XEXP (SUBREG_REG (x), 0));
3294       MEM_COPY_ATTRIBUTES (x, SUBREG_REG (oldx));
3295     }
3296 
3297   /* Push/pop get done as smaller push/pops.  */
3298   if (GET_CODE (x) == MEM
3299       && (GET_CODE (XEXP (x, 0)) == PRE_DEC
3300 	  || GET_CODE (XEXP (x, 0)) == POST_INC))
3301     return gen_rtx_MEM (outer, XEXP (x, 0));
3302   if (GET_CODE (x) == SUBREG
3303       && GET_CODE (XEXP (x, 0)) == MEM
3304       && (GET_CODE (XEXP (XEXP (x, 0), 0)) == PRE_DEC
3305 	  || GET_CODE (XEXP (XEXP (x, 0), 0)) == POST_INC))
3306     return gen_rtx_MEM (outer, XEXP (XEXP (x, 0), 0));
3307 
3308   if (GET_CODE (x) != REG)
3309     {
3310       rtx r = simplify_gen_subreg (outer, x, inner, byte);
3311       if (GET_CODE (r) == SUBREG
3312 	  && GET_CODE (x) == MEM
3313 	  && MEM_VOLATILE_P (x))
3314 	{
3315 	  /* Volatile MEMs don't get simplified, but we need them to
3316 	     be.  We are little endian, so the subreg byte is the
3317 	     offset.  */
3318 	  r = adjust_address_nv (x, outer, byte);
3319 	}
3320       return r;
3321     }
3322 
3323   r = REGNO (x);
3324   if (r >= FIRST_PSEUDO_REGISTER || r == AP_REGNO)
3325     return simplify_gen_subreg (outer, x, inner, byte);
3326 
3327   if (IS_MEM_REGNO (r))
3328     return simplify_gen_subreg (outer, x, inner, byte);
3329 
3330   /* This is where the complexities of our register layout are
3331      described.  */
3332   if (byte == 0)
3333     nr = r;
3334   else if (outer == HImode)
3335     {
3336       if (r == R0_REGNO && byte == 2)
3337 	nr = R2_REGNO;
3338       else if (r == R0_REGNO && byte == 4)
3339 	nr = R1_REGNO;
3340       else if (r == R0_REGNO && byte == 6)
3341 	nr = R3_REGNO;
3342       else if (r == R1_REGNO && byte == 2)
3343 	nr = R3_REGNO;
3344       else if (r == A0_REGNO && byte == 2)
3345 	nr = A1_REGNO;
3346     }
3347   else if (outer == SImode)
3348     {
3349       if (r == R0_REGNO && byte == 0)
3350 	nr = R0_REGNO;
3351       else if (r == R0_REGNO && byte == 4)
3352 	nr = R1_REGNO;
3353     }
3354   if (nr == -1)
3355     {
3356       fprintf (stderr, "m32c_subreg %s %s %d\n",
3357 	       mode_name[outer], mode_name[inner], byte);
3358       debug_rtx (x);
3359       gcc_unreachable ();
3360     }
3361   return gen_rtx_REG (outer, nr);
3362 }
3363 
3364 /* Used to emit move instructions.  We split some moves,
3365    and avoid mem-mem moves.  */
3366 int
3367 m32c_prepare_move (rtx * operands, machine_mode mode)
3368 {
3369   if (far_addr_space_p (operands[0])
3370       && CONSTANT_P (operands[1]))
3371     {
3372       operands[1] = force_reg (GET_MODE (operands[0]), operands[1]);
3373     }
3374   if (TARGET_A16 && mode == PSImode)
3375     return m32c_split_move (operands, mode, 1);
3376   if ((GET_CODE (operands[0]) == MEM)
3377       && (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY))
3378     {
3379       rtx pmv = XEXP (operands[0], 0);
3380       rtx dest_reg = XEXP (pmv, 0);
3381       rtx dest_mod = XEXP (pmv, 1);
3382 
3383       emit_insn (gen_rtx_SET (Pmode, dest_reg, dest_mod));
3384       operands[0] = gen_rtx_MEM (mode, dest_reg);
3385     }
3386   if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
3387     operands[1] = copy_to_mode_reg (mode, operands[1]);
3388   return 0;
3389 }
3390 
3391 #define DEBUG_SPLIT 0
3392 
3393 /* Returns TRUE if the given PSImode move should be split.  We split
3394    for all r8c/m16c moves, since it doesn't support them, and for
3395    POP.L as we can only *push* SImode.  */
3396 int
3397 m32c_split_psi_p (rtx * operands)
3398 {
3399 #if DEBUG_SPLIT
3400   fprintf (stderr, "\nm32c_split_psi_p\n");
3401   debug_rtx (operands[0]);
3402   debug_rtx (operands[1]);
3403 #endif
3404   if (TARGET_A16)
3405     {
3406 #if DEBUG_SPLIT
3407       fprintf (stderr, "yes, A16\n");
3408 #endif
3409       return 1;
3410     }
3411   if (GET_CODE (operands[1]) == MEM
3412       && GET_CODE (XEXP (operands[1], 0)) == POST_INC)
3413     {
3414 #if DEBUG_SPLIT
3415       fprintf (stderr, "yes, pop.l\n");
3416 #endif
3417       return 1;
3418     }
3419 #if DEBUG_SPLIT
3420   fprintf (stderr, "no, default\n");
3421 #endif
3422   return 0;
3423 }
3424 
3425 /* Split the given move.  SPLIT_ALL is 0 if splitting is optional
3426    (define_expand), 1 if it is not optional (define_insn_and_split),
3427    and 3 for define_split (alternate api). */
3428 int
3429 m32c_split_move (rtx * operands, machine_mode mode, int split_all)
3430 {
3431   rtx s[4], d[4];
3432   int parts, si, di, rev = 0;
3433   int rv = 0, opi = 2;
3434   machine_mode submode = HImode;
3435   rtx *ops, local_ops[10];
3436 
3437   /* define_split modifies the existing operands, but the other two
3438      emit new insns.  OPS is where we store the operand pairs, which
3439      we emit later.  */
3440   if (split_all == 3)
3441     ops = operands;
3442   else
3443     ops = local_ops;
3444 
3445   /* Else HImode.  */
3446   if (mode == DImode)
3447     submode = SImode;
3448 
3449   /* Before splitting mem-mem moves, force one operand into a
3450      register.  */
3451   if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
3452     {
3453 #if DEBUG0
3454       fprintf (stderr, "force_reg...\n");
3455       debug_rtx (operands[1]);
3456 #endif
3457       operands[1] = force_reg (mode, operands[1]);
3458 #if DEBUG0
3459       debug_rtx (operands[1]);
3460 #endif
3461     }
3462 
3463   parts = 2;
3464 
3465 #if DEBUG_SPLIT
3466   fprintf (stderr, "\nsplit_move %d all=%d\n", !can_create_pseudo_p (),
3467 	   split_all);
3468   debug_rtx (operands[0]);
3469   debug_rtx (operands[1]);
3470 #endif
3471 
3472   /* Note that split_all is not used to select the api after this
3473      point, so it's safe to set it to 3 even with define_insn.  */
3474   /* None of the chips can move SI operands to sp-relative addresses,
3475      so we always split those.  */
3476   if (satisfies_constraint_Ss (operands[0]))
3477     split_all = 3;
3478 
3479   if (TARGET_A16
3480       && (far_addr_space_p (operands[0])
3481 	  || far_addr_space_p (operands[1])))
3482     split_all |= 1;
3483 
3484   /* We don't need to split these.  */
3485   if (TARGET_A24
3486       && split_all != 3
3487       && (mode == SImode || mode == PSImode)
3488       && !(GET_CODE (operands[1]) == MEM
3489 	   && GET_CODE (XEXP (operands[1], 0)) == POST_INC))
3490     return 0;
3491 
3492   /* First, enumerate the subregs we'll be dealing with.  */
3493   for (si = 0; si < parts; si++)
3494     {
3495       d[si] =
3496 	m32c_subreg (submode, operands[0], mode,
3497 		     si * GET_MODE_SIZE (submode));
3498       s[si] =
3499 	m32c_subreg (submode, operands[1], mode,
3500 		     si * GET_MODE_SIZE (submode));
3501     }
3502 
3503   /* Split pushes by emitting a sequence of smaller pushes.  */
3504   if (GET_CODE (d[0]) == MEM && GET_CODE (XEXP (d[0], 0)) == PRE_DEC)
3505     {
3506       for (si = parts - 1; si >= 0; si--)
3507 	{
3508 	  ops[opi++] = gen_rtx_MEM (submode,
3509 				    gen_rtx_PRE_DEC (Pmode,
3510 						     gen_rtx_REG (Pmode,
3511 								  SP_REGNO)));
3512 	  ops[opi++] = s[si];
3513 	}
3514 
3515       rv = 1;
3516     }
3517   /* Likewise for pops.  */
3518   else if (GET_CODE (s[0]) == MEM && GET_CODE (XEXP (s[0], 0)) == POST_INC)
3519     {
3520       for (di = 0; di < parts; di++)
3521 	{
3522 	  ops[opi++] = d[di];
3523 	  ops[opi++] = gen_rtx_MEM (submode,
3524 				    gen_rtx_POST_INC (Pmode,
3525 						      gen_rtx_REG (Pmode,
3526 								   SP_REGNO)));
3527 	}
3528       rv = 1;
3529     }
3530   else if (split_all)
3531     {
3532       /* if d[di] == s[si] for any di < si, we'll early clobber. */
3533       for (di = 0; di < parts - 1; di++)
3534 	for (si = di + 1; si < parts; si++)
3535 	  if (reg_mentioned_p (d[di], s[si]))
3536 	    rev = 1;
3537 
3538       if (rev)
3539 	for (si = 0; si < parts; si++)
3540 	  {
3541 	    ops[opi++] = d[si];
3542 	    ops[opi++] = s[si];
3543 	  }
3544       else
3545 	for (si = parts - 1; si >= 0; si--)
3546 	  {
3547 	    ops[opi++] = d[si];
3548 	    ops[opi++] = s[si];
3549 	  }
3550       rv = 1;
3551     }
3552   /* Now emit any moves we may have accumulated.  */
3553   if (rv && split_all != 3)
3554     {
3555       int i;
3556       for (i = 2; i < opi; i += 2)
3557 	emit_move_insn (ops[i], ops[i + 1]);
3558     }
3559   return rv;
3560 }
3561 
3562 /* The m32c has a number of opcodes that act like memcpy, strcmp, and
3563    the like.  For the R8C they expect one of the addresses to be in
3564    R1L:An so we need to arrange for that.  Otherwise, it's just a
3565    matter of picking out the operands we want and emitting the right
3566    pattern for them.  All these expanders, which correspond to
3567    patterns in blkmov.md, must return nonzero if they expand the insn,
3568    or zero if they should FAIL.  */
3569 
3570 /* This is a memset() opcode.  All operands are implied, so we need to
3571    arrange for them to be in the right registers.  The opcode wants
3572    addresses, not [mem] syntax.  $0 is the destination (MEM:BLK), $1
3573    the count (HI), and $2 the value (QI).  */
3574 int
3575 m32c_expand_setmemhi(rtx *operands)
3576 {
3577   rtx desta, count, val;
3578   rtx desto, counto;
3579 
3580   desta = XEXP (operands[0], 0);
3581   count = operands[1];
3582   val = operands[2];
3583 
3584   desto = gen_reg_rtx (Pmode);
3585   counto = gen_reg_rtx (HImode);
3586 
3587   if (GET_CODE (desta) != REG
3588       || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3589     desta = copy_to_mode_reg (Pmode, desta);
3590 
3591   /* This looks like an arbitrary restriction, but this is by far the
3592      most common case.  For counts 8..14 this actually results in
3593      smaller code with no speed penalty because the half-sized
3594      constant can be loaded with a shorter opcode.  */
3595   if (GET_CODE (count) == CONST_INT
3596       && GET_CODE (val) == CONST_INT
3597       && ! (INTVAL (count) & 1)
3598       && (INTVAL (count) > 1)
3599       && (INTVAL (val) <= 7 && INTVAL (val) >= -8))
3600     {
3601       unsigned v = INTVAL (val) & 0xff;
3602       v = v | (v << 8);
3603       count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3604       val = copy_to_mode_reg (HImode, GEN_INT (v));
3605       if (TARGET_A16)
3606 	emit_insn (gen_setmemhi_whi_op (desto, counto, val, desta, count));
3607       else
3608 	emit_insn (gen_setmemhi_wpsi_op (desto, counto, val, desta, count));
3609       return 1;
3610     }
3611 
3612   /* This is the generalized memset() case.  */
3613   if (GET_CODE (val) != REG
3614       || REGNO (val) < FIRST_PSEUDO_REGISTER)
3615     val = copy_to_mode_reg (QImode, val);
3616 
3617   if (GET_CODE (count) != REG
3618       || REGNO (count) < FIRST_PSEUDO_REGISTER)
3619     count = copy_to_mode_reg (HImode, count);
3620 
3621   if (TARGET_A16)
3622     emit_insn (gen_setmemhi_bhi_op (desto, counto, val, desta, count));
3623   else
3624     emit_insn (gen_setmemhi_bpsi_op (desto, counto, val, desta, count));
3625 
3626   return 1;
3627 }
3628 
3629 /* This is a memcpy() opcode.  All operands are implied, so we need to
3630    arrange for them to be in the right registers.  The opcode wants
3631    addresses, not [mem] syntax.  $0 is the destination (MEM:BLK), $1
3632    is the source (MEM:BLK), and $2 the count (HI).  */
3633 int
3634 m32c_expand_movmemhi(rtx *operands)
3635 {
3636   rtx desta, srca, count;
3637   rtx desto, srco, counto;
3638 
3639   desta = XEXP (operands[0], 0);
3640   srca = XEXP (operands[1], 0);
3641   count = operands[2];
3642 
3643   desto = gen_reg_rtx (Pmode);
3644   srco = gen_reg_rtx (Pmode);
3645   counto = gen_reg_rtx (HImode);
3646 
3647   if (GET_CODE (desta) != REG
3648       || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3649     desta = copy_to_mode_reg (Pmode, desta);
3650 
3651   if (GET_CODE (srca) != REG
3652       || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3653     srca = copy_to_mode_reg (Pmode, srca);
3654 
3655   /* Similar to setmem, but we don't need to check the value.  */
3656   if (GET_CODE (count) == CONST_INT
3657       && ! (INTVAL (count) & 1)
3658       && (INTVAL (count) > 1))
3659     {
3660       count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3661       if (TARGET_A16)
3662 	emit_insn (gen_movmemhi_whi_op (desto, srco, counto, desta, srca, count));
3663       else
3664 	emit_insn (gen_movmemhi_wpsi_op (desto, srco, counto, desta, srca, count));
3665       return 1;
3666     }
3667 
3668   /* This is the generalized memset() case.  */
3669   if (GET_CODE (count) != REG
3670       || REGNO (count) < FIRST_PSEUDO_REGISTER)
3671     count = copy_to_mode_reg (HImode, count);
3672 
3673   if (TARGET_A16)
3674     emit_insn (gen_movmemhi_bhi_op (desto, srco, counto, desta, srca, count));
3675   else
3676     emit_insn (gen_movmemhi_bpsi_op (desto, srco, counto, desta, srca, count));
3677 
3678   return 1;
3679 }
3680 
3681 /* This is a stpcpy() opcode.  $0 is the destination (MEM:BLK) after
3682    the copy, which should point to the NUL at the end of the string,
3683    $1 is the destination (MEM:BLK), and $2 is the source (MEM:BLK).
3684    Since our opcode leaves the destination pointing *after* the NUL,
3685    we must emit an adjustment.  */
3686 int
3687 m32c_expand_movstr(rtx *operands)
3688 {
3689   rtx desta, srca;
3690   rtx desto, srco;
3691 
3692   desta = XEXP (operands[1], 0);
3693   srca = XEXP (operands[2], 0);
3694 
3695   desto = gen_reg_rtx (Pmode);
3696   srco = gen_reg_rtx (Pmode);
3697 
3698   if (GET_CODE (desta) != REG
3699       || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3700     desta = copy_to_mode_reg (Pmode, desta);
3701 
3702   if (GET_CODE (srca) != REG
3703       || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3704     srca = copy_to_mode_reg (Pmode, srca);
3705 
3706   emit_insn (gen_movstr_op (desto, srco, desta, srca));
3707   /* desto ends up being a1, which allows this type of add through MOVA.  */
3708   emit_insn (gen_addpsi3 (operands[0], desto, GEN_INT (-1)));
3709 
3710   return 1;
3711 }
3712 
3713 /* This is a strcmp() opcode.  $0 is the destination (HI) which holds
3714    <=>0 depending on the comparison, $1 is one string (MEM:BLK), and
3715    $2 is the other (MEM:BLK).  We must do the comparison, and then
3716    convert the flags to a signed integer result.  */
3717 int
3718 m32c_expand_cmpstr(rtx *operands)
3719 {
3720   rtx src1a, src2a;
3721 
3722   src1a = XEXP (operands[1], 0);
3723   src2a = XEXP (operands[2], 0);
3724 
3725   if (GET_CODE (src1a) != REG
3726       || REGNO (src1a) < FIRST_PSEUDO_REGISTER)
3727     src1a = copy_to_mode_reg (Pmode, src1a);
3728 
3729   if (GET_CODE (src2a) != REG
3730       || REGNO (src2a) < FIRST_PSEUDO_REGISTER)
3731     src2a = copy_to_mode_reg (Pmode, src2a);
3732 
3733   emit_insn (gen_cmpstrhi_op (src1a, src2a, src1a, src2a));
3734   emit_insn (gen_cond_to_int (operands[0]));
3735 
3736   return 1;
3737 }
3738 
3739 
3740 typedef rtx (*shift_gen_func)(rtx, rtx, rtx);
3741 
3742 static shift_gen_func
3743 shift_gen_func_for (int mode, int code)
3744 {
3745 #define GFF(m,c,f) if (mode == m && code == c) return f
3746   GFF(QImode,  ASHIFT,   gen_ashlqi3_i);
3747   GFF(QImode,  ASHIFTRT, gen_ashrqi3_i);
3748   GFF(QImode,  LSHIFTRT, gen_lshrqi3_i);
3749   GFF(HImode,  ASHIFT,   gen_ashlhi3_i);
3750   GFF(HImode,  ASHIFTRT, gen_ashrhi3_i);
3751   GFF(HImode,  LSHIFTRT, gen_lshrhi3_i);
3752   GFF(PSImode, ASHIFT,   gen_ashlpsi3_i);
3753   GFF(PSImode, ASHIFTRT, gen_ashrpsi3_i);
3754   GFF(PSImode, LSHIFTRT, gen_lshrpsi3_i);
3755   GFF(SImode,  ASHIFT,   TARGET_A16 ? gen_ashlsi3_16 : gen_ashlsi3_24);
3756   GFF(SImode,  ASHIFTRT, TARGET_A16 ? gen_ashrsi3_16 : gen_ashrsi3_24);
3757   GFF(SImode,  LSHIFTRT, TARGET_A16 ? gen_lshrsi3_16 : gen_lshrsi3_24);
3758 #undef GFF
3759   gcc_unreachable ();
3760 }
3761 
3762 /* The m32c only has one shift, but it takes a signed count.  GCC
3763    doesn't want this, so we fake it by negating any shift count when
3764    we're pretending to shift the other way.  Also, the shift count is
3765    limited to -8..8.  It's slightly better to use two shifts for 9..15
3766    than to load the count into r1h, so we do that too.  */
3767 int
3768 m32c_prepare_shift (rtx * operands, int scale, int shift_code)
3769 {
3770   machine_mode mode = GET_MODE (operands[0]);
3771   shift_gen_func func = shift_gen_func_for (mode, shift_code);
3772   rtx temp;
3773 
3774   if (GET_CODE (operands[2]) == CONST_INT)
3775     {
3776       int maxc = TARGET_A24 && (mode == PSImode || mode == SImode) ? 32 : 8;
3777       int count = INTVAL (operands[2]) * scale;
3778 
3779       while (count > maxc)
3780 	{
3781 	  temp = gen_reg_rtx (mode);
3782 	  emit_insn (func (temp, operands[1], GEN_INT (maxc)));
3783 	  operands[1] = temp;
3784 	  count -= maxc;
3785 	}
3786       while (count < -maxc)
3787 	{
3788 	  temp = gen_reg_rtx (mode);
3789 	  emit_insn (func (temp, operands[1], GEN_INT (-maxc)));
3790 	  operands[1] = temp;
3791 	  count += maxc;
3792 	}
3793       emit_insn (func (operands[0], operands[1], GEN_INT (count)));
3794       return 1;
3795     }
3796 
3797   temp = gen_reg_rtx (QImode);
3798   if (scale < 0)
3799     /* The pattern has a NEG that corresponds to this. */
3800     emit_move_insn (temp, gen_rtx_NEG (QImode, operands[2]));
3801   else if (TARGET_A16 && mode == SImode)
3802     /* We do this because the code below may modify this, we don't
3803        want to modify the origin of this value.  */
3804     emit_move_insn (temp, operands[2]);
3805   else
3806     /* We'll only use it for the shift, no point emitting a move.  */
3807     temp = operands[2];
3808 
3809   if (TARGET_A16 && GET_MODE_SIZE (mode) == 4)
3810     {
3811       /* The m16c has a limit of -16..16 for SI shifts, even when the
3812 	 shift count is in a register.  Since there are so many targets
3813 	 of these shifts, it's better to expand the RTL here than to
3814 	 call a helper function.
3815 
3816 	 The resulting code looks something like this:
3817 
3818 		cmp.b	r1h,-16
3819 		jge.b	1f
3820 		shl.l	-16,dest
3821 		add.b	r1h,16
3822 	1f:	cmp.b	r1h,16
3823 		jle.b	1f
3824 		shl.l	16,dest
3825 		sub.b	r1h,16
3826 	1f:	shl.l	r1h,dest
3827 
3828 	 We take advantage of the fact that "negative" shifts are
3829 	 undefined to skip one of the comparisons.  */
3830 
3831       rtx count;
3832       rtx label, tempvar;
3833       rtx_insn *insn;
3834 
3835       emit_move_insn (operands[0], operands[1]);
3836 
3837       count = temp;
3838       label = gen_label_rtx ();
3839       LABEL_NUSES (label) ++;
3840 
3841       tempvar = gen_reg_rtx (mode);
3842 
3843       if (shift_code == ASHIFT)
3844 	{
3845 	  /* This is a left shift.  We only need check positive counts.  */
3846 	  emit_jump_insn (gen_cbranchqi4 (gen_rtx_LE (VOIDmode, 0, 0),
3847 					  count, GEN_INT (16), label));
3848 	  emit_insn (func (tempvar, operands[0], GEN_INT (8)));
3849 	  emit_insn (func (operands[0], tempvar, GEN_INT (8)));
3850 	  insn = emit_insn (gen_addqi3 (count, count, GEN_INT (-16)));
3851 	  emit_label_after (label, insn);
3852 	}
3853       else
3854 	{
3855 	  /* This is a right shift.  We only need check negative counts.  */
3856 	  emit_jump_insn (gen_cbranchqi4 (gen_rtx_GE (VOIDmode, 0, 0),
3857 					  count, GEN_INT (-16), label));
3858 	  emit_insn (func (tempvar, operands[0], GEN_INT (-8)));
3859 	  emit_insn (func (operands[0], tempvar, GEN_INT (-8)));
3860 	  insn = emit_insn (gen_addqi3 (count, count, GEN_INT (16)));
3861 	  emit_label_after (label, insn);
3862 	}
3863       operands[1] = operands[0];
3864       emit_insn (func (operands[0], operands[0], count));
3865       return 1;
3866     }
3867 
3868   operands[2] = temp;
3869   return 0;
3870 }
3871 
3872 /* The m32c has a limited range of operations that work on PSImode
3873    values; we have to expand to SI, do the math, and truncate back to
3874    PSI.  Yes, this is expensive, but hopefully gcc will learn to avoid
3875    those cases.  */
3876 void
3877 m32c_expand_neg_mulpsi3 (rtx * operands)
3878 {
3879   /* operands: a = b * i */
3880   rtx temp1; /* b as SI */
3881   rtx scale /* i as SI */;
3882   rtx temp2; /* a*b as SI */
3883 
3884   temp1 = gen_reg_rtx (SImode);
3885   temp2 = gen_reg_rtx (SImode);
3886   if (GET_CODE (operands[2]) != CONST_INT)
3887     {
3888       scale = gen_reg_rtx (SImode);
3889       emit_insn (gen_zero_extendpsisi2 (scale, operands[2]));
3890     }
3891   else
3892     scale = copy_to_mode_reg (SImode, operands[2]);
3893 
3894   emit_insn (gen_zero_extendpsisi2 (temp1, operands[1]));
3895   temp2 = expand_simple_binop (SImode, MULT, temp1, scale, temp2, 1, OPTAB_LIB);
3896   emit_insn (gen_truncsipsi2 (operands[0], temp2));
3897 }
3898 
3899 /* Pattern Output Functions */
3900 
3901 int
3902 m32c_expand_movcc (rtx *operands)
3903 {
3904   rtx rel = operands[1];
3905 
3906   if (GET_CODE (rel) != EQ && GET_CODE (rel) != NE)
3907     return 1;
3908   if (GET_CODE (operands[2]) != CONST_INT
3909       || GET_CODE (operands[3]) != CONST_INT)
3910     return 1;
3911   if (GET_CODE (rel) == NE)
3912     {
3913       rtx tmp = operands[2];
3914       operands[2] = operands[3];
3915       operands[3] = tmp;
3916       rel = gen_rtx_EQ (GET_MODE (rel), XEXP (rel, 0), XEXP (rel, 1));
3917     }
3918 
3919   emit_move_insn (operands[0],
3920 		  gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
3921 					rel,
3922 					operands[2],
3923 					operands[3]));
3924   return 0;
3925 }
3926 
3927 /* Used for the "insv" pattern.  Return nonzero to fail, else done.  */
3928 int
3929 m32c_expand_insv (rtx *operands)
3930 {
3931   rtx op0, src0, p;
3932   int mask;
3933 
3934   if (INTVAL (operands[1]) != 1)
3935     return 1;
3936 
3937   /* Our insv opcode (bset, bclr) can only insert a one-bit constant.  */
3938   if (GET_CODE (operands[3]) != CONST_INT)
3939     return 1;
3940   if (INTVAL (operands[3]) != 0
3941       && INTVAL (operands[3]) != 1
3942       && INTVAL (operands[3]) != -1)
3943     return 1;
3944 
3945   mask = 1 << INTVAL (operands[2]);
3946 
3947   op0 = operands[0];
3948   if (GET_CODE (op0) == SUBREG
3949       && SUBREG_BYTE (op0) == 0)
3950     {
3951       rtx sub = SUBREG_REG (op0);
3952       if (GET_MODE (sub) == HImode || GET_MODE (sub) == QImode)
3953 	op0 = sub;
3954     }
3955 
3956   if (!can_create_pseudo_p ()
3957       || (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0)))
3958     src0 = op0;
3959   else
3960     {
3961       src0 = gen_reg_rtx (GET_MODE (op0));
3962       emit_move_insn (src0, op0);
3963     }
3964 
3965   if (GET_MODE (op0) == HImode
3966       && INTVAL (operands[2]) >= 8
3967       && GET_CODE (op0) == MEM)
3968     {
3969       /* We are little endian.  */
3970       rtx new_mem = gen_rtx_MEM (QImode, plus_constant (Pmode,
3971 							XEXP (op0, 0), 1));
3972       MEM_COPY_ATTRIBUTES (new_mem, op0);
3973       mask >>= 8;
3974     }
3975 
3976   /* First, we generate a mask with the correct polarity.  If we are
3977      storing a zero, we want an AND mask, so invert it.  */
3978   if (INTVAL (operands[3]) == 0)
3979     {
3980       /* Storing a zero, use an AND mask */
3981       if (GET_MODE (op0) == HImode)
3982 	mask ^= 0xffff;
3983       else
3984 	mask ^= 0xff;
3985     }
3986   /* Now we need to properly sign-extend the mask in case we need to
3987      fall back to an AND or OR opcode.  */
3988   if (GET_MODE (op0) == HImode)
3989     {
3990       if (mask & 0x8000)
3991 	mask -= 0x10000;
3992     }
3993   else
3994     {
3995       if (mask & 0x80)
3996 	mask -= 0x100;
3997     }
3998 
3999   switch (  (INTVAL (operands[3]) ? 4 : 0)
4000 	  + ((GET_MODE (op0) == HImode) ? 2 : 0)
4001 	  + (TARGET_A24 ? 1 : 0))
4002     {
4003     case 0: p = gen_andqi3_16 (op0, src0, GEN_INT (mask)); break;
4004     case 1: p = gen_andqi3_24 (op0, src0, GEN_INT (mask)); break;
4005     case 2: p = gen_andhi3_16 (op0, src0, GEN_INT (mask)); break;
4006     case 3: p = gen_andhi3_24 (op0, src0, GEN_INT (mask)); break;
4007     case 4: p = gen_iorqi3_16 (op0, src0, GEN_INT (mask)); break;
4008     case 5: p = gen_iorqi3_24 (op0, src0, GEN_INT (mask)); break;
4009     case 6: p = gen_iorhi3_16 (op0, src0, GEN_INT (mask)); break;
4010     case 7: p = gen_iorhi3_24 (op0, src0, GEN_INT (mask)); break;
4011     default: p = NULL_RTX; break; /* Not reached, but silences a warning.  */
4012     }
4013 
4014   emit_insn (p);
4015   return 0;
4016 }
4017 
4018 const char *
4019 m32c_scc_pattern(rtx *operands, RTX_CODE code)
4020 {
4021   static char buf[30];
4022   if (GET_CODE (operands[0]) == REG
4023       && REGNO (operands[0]) == R0_REGNO)
4024     {
4025       if (code == EQ)
4026 	return "stzx\t#1,#0,r0l";
4027       if (code == NE)
4028 	return "stzx\t#0,#1,r0l";
4029     }
4030   sprintf(buf, "bm%s\t0,%%h0\n\tand.b\t#1,%%0", GET_RTX_NAME (code));
4031   return buf;
4032 }
4033 
4034 /* Encode symbol attributes of a SYMBOL_REF into its
4035    SYMBOL_REF_FLAGS. */
4036 static void
4037 m32c_encode_section_info (tree decl, rtx rtl, int first)
4038 {
4039   int extra_flags = 0;
4040 
4041   default_encode_section_info (decl, rtl, first);
4042   if (TREE_CODE (decl) == FUNCTION_DECL
4043       && m32c_special_page_vector_p (decl))
4044 
4045     extra_flags = SYMBOL_FLAG_FUNCVEC_FUNCTION;
4046 
4047   if (extra_flags)
4048     SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= extra_flags;
4049 }
4050 
4051 /* Returns TRUE if the current function is a leaf, and thus we can
4052    determine which registers an interrupt function really needs to
4053    save.  The logic below is mostly about finding the insn sequence
4054    that's the function, versus any sequence that might be open for the
4055    current insn.  */
4056 static int
4057 m32c_leaf_function_p (void)
4058 {
4059   rtx_insn *saved_first, *saved_last;
4060   struct sequence_stack *seq;
4061   int rv;
4062 
4063   saved_first = crtl->emit.x_first_insn;
4064   saved_last = crtl->emit.x_last_insn;
4065   for (seq = crtl->emit.sequence_stack; seq && seq->next; seq = seq->next)
4066     ;
4067   if (seq)
4068     {
4069       crtl->emit.x_first_insn = seq->first;
4070       crtl->emit.x_last_insn = seq->last;
4071     }
4072 
4073   rv = leaf_function_p ();
4074 
4075   crtl->emit.x_first_insn = saved_first;
4076   crtl->emit.x_last_insn = saved_last;
4077   return rv;
4078 }
4079 
4080 /* Returns TRUE if the current function needs to use the ENTER/EXIT
4081    opcodes.  If the function doesn't need the frame base or stack
4082    pointer, it can use the simpler RTS opcode.  */
4083 static bool
4084 m32c_function_needs_enter (void)
4085 {
4086   rtx_insn *insn;
4087   struct sequence_stack *seq;
4088   rtx sp = gen_rtx_REG (Pmode, SP_REGNO);
4089   rtx fb = gen_rtx_REG (Pmode, FB_REGNO);
4090 
4091   insn = get_insns ();
4092   for (seq = crtl->emit.sequence_stack;
4093        seq;
4094        insn = seq->first, seq = seq->next);
4095 
4096   while (insn)
4097     {
4098       if (reg_mentioned_p (sp, insn))
4099 	return true;
4100       if (reg_mentioned_p (fb, insn))
4101 	return true;
4102       insn = NEXT_INSN (insn);
4103     }
4104   return false;
4105 }
4106 
4107 /* Mark all the subexpressions of the PARALLEL rtx PAR as
4108    frame-related.  Return PAR.
4109 
4110    dwarf2out.c:dwarf2out_frame_debug_expr ignores sub-expressions of a
4111    PARALLEL rtx other than the first if they do not have the
4112    FRAME_RELATED flag set on them.  So this function is handy for
4113    marking up 'enter' instructions.  */
4114 static rtx
4115 m32c_all_frame_related (rtx par)
4116 {
4117   int len = XVECLEN (par, 0);
4118   int i;
4119 
4120   for (i = 0; i < len; i++)
4121     F (XVECEXP (par, 0, i));
4122 
4123   return par;
4124 }
4125 
4126 /* Emits the prologue.  See the frame layout comment earlier in this
4127    file.  We can reserve up to 256 bytes with the ENTER opcode, beyond
4128    that we manually update sp.  */
4129 void
4130 m32c_emit_prologue (void)
4131 {
4132   int frame_size, extra_frame_size = 0, reg_save_size;
4133   int complex_prologue = 0;
4134 
4135   cfun->machine->is_leaf = m32c_leaf_function_p ();
4136   if (interrupt_p (cfun->decl))
4137     {
4138       cfun->machine->is_interrupt = 1;
4139       complex_prologue = 1;
4140     }
4141   else if (bank_switch_p (cfun->decl))
4142     warning (OPT_Wattributes,
4143 	     "%<bank_switch%> has no effect on non-interrupt functions");
4144 
4145   reg_save_size = m32c_pushm_popm (PP_justcount);
4146 
4147   if (interrupt_p (cfun->decl))
4148     {
4149       if (bank_switch_p (cfun->decl))
4150 	emit_insn (gen_fset_b ());
4151       else if (cfun->machine->intr_pushm)
4152 	emit_insn (gen_pushm (GEN_INT (cfun->machine->intr_pushm)));
4153     }
4154 
4155   frame_size =
4156     m32c_initial_elimination_offset (FB_REGNO, SP_REGNO) - reg_save_size;
4157   if (frame_size == 0
4158       && !m32c_function_needs_enter ())
4159     cfun->machine->use_rts = 1;
4160 
4161   if (frame_size > 254)
4162     {
4163       extra_frame_size = frame_size - 254;
4164       frame_size = 254;
4165     }
4166   if (cfun->machine->use_rts == 0)
4167     F (emit_insn (m32c_all_frame_related
4168 		  (TARGET_A16
4169 		   ? gen_prologue_enter_16 (GEN_INT (frame_size + 2))
4170 		   : gen_prologue_enter_24 (GEN_INT (frame_size + 4)))));
4171 
4172   if (extra_frame_size)
4173     {
4174       complex_prologue = 1;
4175       if (TARGET_A16)
4176 	F (emit_insn (gen_addhi3 (gen_rtx_REG (HImode, SP_REGNO),
4177 				  gen_rtx_REG (HImode, SP_REGNO),
4178 				  GEN_INT (-extra_frame_size))));
4179       else
4180 	F (emit_insn (gen_addpsi3 (gen_rtx_REG (PSImode, SP_REGNO),
4181 				   gen_rtx_REG (PSImode, SP_REGNO),
4182 				   GEN_INT (-extra_frame_size))));
4183     }
4184 
4185   complex_prologue += m32c_pushm_popm (PP_pushm);
4186 
4187   /* This just emits a comment into the .s file for debugging.  */
4188   if (complex_prologue)
4189     emit_insn (gen_prologue_end ());
4190 }
4191 
4192 /* Likewise, for the epilogue.  The only exception is that, for
4193    interrupts, we must manually unwind the frame as the REIT opcode
4194    doesn't do that.  */
4195 void
4196 m32c_emit_epilogue (void)
4197 {
4198   int popm_count = m32c_pushm_popm (PP_justcount);
4199 
4200   /* This just emits a comment into the .s file for debugging.  */
4201   if (popm_count > 0 || cfun->machine->is_interrupt)
4202     emit_insn (gen_epilogue_start ());
4203 
4204   if (popm_count > 0)
4205     m32c_pushm_popm (PP_popm);
4206 
4207   if (cfun->machine->is_interrupt)
4208     {
4209       machine_mode spmode = TARGET_A16 ? HImode : PSImode;
4210 
4211       /* REIT clears B flag and restores $fp for us, but we still
4212 	 have to fix up the stack.  USE_RTS just means we didn't
4213 	 emit ENTER.  */
4214       if (!cfun->machine->use_rts)
4215 	{
4216 	  emit_move_insn (gen_rtx_REG (spmode, A0_REGNO),
4217 			  gen_rtx_REG (spmode, FP_REGNO));
4218 	  emit_move_insn (gen_rtx_REG (spmode, SP_REGNO),
4219 			  gen_rtx_REG (spmode, A0_REGNO));
4220 	  /* We can't just add this to the POPM because it would be in
4221 	     the wrong order, and wouldn't fix the stack if we're bank
4222 	     switching.  */
4223 	  if (TARGET_A16)
4224 	    emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, FP_REGNO)));
4225 	  else
4226 	    emit_insn (gen_poppsi (gen_rtx_REG (PSImode, FP_REGNO)));
4227 	}
4228       if (!bank_switch_p (cfun->decl) && cfun->machine->intr_pushm)
4229 	emit_insn (gen_popm (GEN_INT (cfun->machine->intr_pushm)));
4230 
4231       /* The FREIT (Fast REturn from InTerrupt) instruction should be
4232          generated only for M32C/M32CM targets (generate the REIT
4233          instruction otherwise).  */
4234       if (fast_interrupt_p (cfun->decl))
4235         {
4236           /* Check if fast_attribute is set for M32C or M32CM.  */
4237           if (TARGET_A24)
4238             {
4239               emit_jump_insn (gen_epilogue_freit ());
4240             }
4241           /* If fast_interrupt attribute is set for an R8C or M16C
4242              target ignore this attribute and generated REIT
4243              instruction.  */
4244           else
4245 	    {
4246 	      warning (OPT_Wattributes,
4247 		       "%<fast_interrupt%> attribute directive ignored");
4248 	      emit_jump_insn (gen_epilogue_reit_16 ());
4249 	    }
4250         }
4251       else if (TARGET_A16)
4252 	emit_jump_insn (gen_epilogue_reit_16 ());
4253       else
4254 	emit_jump_insn (gen_epilogue_reit_24 ());
4255     }
4256   else if (cfun->machine->use_rts)
4257     emit_jump_insn (gen_epilogue_rts ());
4258   else if (TARGET_A16)
4259     emit_jump_insn (gen_epilogue_exitd_16 ());
4260   else
4261     emit_jump_insn (gen_epilogue_exitd_24 ());
4262 }
4263 
4264 void
4265 m32c_emit_eh_epilogue (rtx ret_addr)
4266 {
4267   /* R0[R2] has the stack adjustment.  R1[R3] has the address to
4268      return to.  We have to fudge the stack, pop everything, pop SP
4269      (fudged), and return (fudged).  This is actually easier to do in
4270      assembler, so punt to libgcc.  */
4271   emit_jump_insn (gen_eh_epilogue (ret_addr, cfun->machine->eh_stack_adjust));
4272   /*  emit_clobber (gen_rtx_REG (HImode, R0L_REGNO)); */
4273 }
4274 
4275 /* Indicate which flags must be properly set for a given conditional.  */
4276 static int
4277 flags_needed_for_conditional (rtx cond)
4278 {
4279   switch (GET_CODE (cond))
4280     {
4281     case LE:
4282     case GT:
4283       return FLAGS_OSZ;
4284     case LEU:
4285     case GTU:
4286       return FLAGS_ZC;
4287     case LT:
4288     case GE:
4289       return FLAGS_OS;
4290     case LTU:
4291     case GEU:
4292       return FLAGS_C;
4293     case EQ:
4294     case NE:
4295       return FLAGS_Z;
4296     default:
4297       return FLAGS_N;
4298     }
4299 }
4300 
4301 #define DEBUG_CMP 0
4302 
4303 /* Returns true if a compare insn is redundant because it would only
4304    set flags that are already set correctly.  */
4305 static bool
4306 m32c_compare_redundant (rtx_insn *cmp, rtx *operands)
4307 {
4308   int flags_needed;
4309   int pflags;
4310   rtx_insn *prev;
4311   rtx pp, next;
4312   rtx op0, op1;
4313 #if DEBUG_CMP
4314   int prev_icode, i;
4315 #endif
4316 
4317   op0 = operands[0];
4318   op1 = operands[1];
4319 
4320 #if DEBUG_CMP
4321   fprintf(stderr, "\n\033[32mm32c_compare_redundant\033[0m\n");
4322   debug_rtx(cmp);
4323   for (i=0; i<2; i++)
4324     {
4325       fprintf(stderr, "operands[%d] = ", i);
4326       debug_rtx(operands[i]);
4327     }
4328 #endif
4329 
4330   next = next_nonnote_insn (cmp);
4331   if (!next || !INSN_P (next))
4332     {
4333 #if DEBUG_CMP
4334       fprintf(stderr, "compare not followed by insn\n");
4335       debug_rtx(next);
4336 #endif
4337       return false;
4338     }
4339   if (GET_CODE (PATTERN (next)) == SET
4340       && GET_CODE (XEXP ( PATTERN (next), 1)) == IF_THEN_ELSE)
4341     {
4342       next = XEXP (XEXP (PATTERN (next), 1), 0);
4343     }
4344   else if (GET_CODE (PATTERN (next)) == SET)
4345     {
4346       /* If this is a conditional, flags_needed will be something
4347 	 other than FLAGS_N, which we test below.  */
4348       next = XEXP (PATTERN (next), 1);
4349     }
4350   else
4351     {
4352 #if DEBUG_CMP
4353       fprintf(stderr, "compare not followed by conditional\n");
4354       debug_rtx(next);
4355 #endif
4356       return false;
4357     }
4358 #if DEBUG_CMP
4359   fprintf(stderr, "conditional is: ");
4360   debug_rtx(next);
4361 #endif
4362 
4363   flags_needed = flags_needed_for_conditional (next);
4364   if (flags_needed == FLAGS_N)
4365     {
4366 #if DEBUG_CMP
4367       fprintf(stderr, "compare not followed by conditional\n");
4368       debug_rtx(next);
4369 #endif
4370       return false;
4371     }
4372 
4373   /* Compare doesn't set overflow and carry the same way that
4374      arithmetic instructions do, so we can't replace those.  */
4375   if (flags_needed & FLAGS_OC)
4376     return false;
4377 
4378   prev = cmp;
4379   do {
4380     prev = prev_nonnote_insn (prev);
4381     if (!prev)
4382       {
4383 #if DEBUG_CMP
4384 	fprintf(stderr, "No previous insn.\n");
4385 #endif
4386 	return false;
4387       }
4388     if (!INSN_P (prev))
4389       {
4390 #if DEBUG_CMP
4391 	fprintf(stderr, "Previous insn is a non-insn.\n");
4392 #endif
4393 	return false;
4394       }
4395     pp = PATTERN (prev);
4396     if (GET_CODE (pp) != SET)
4397       {
4398 #if DEBUG_CMP
4399 	fprintf(stderr, "Previous insn is not a SET.\n");
4400 #endif
4401 	return false;
4402       }
4403     pflags = get_attr_flags (prev);
4404 
4405     /* Looking up attributes of previous insns corrupted the recog
4406        tables.  */
4407     INSN_UID (cmp) = -1;
4408     recog (PATTERN (cmp), cmp, 0);
4409 
4410     if (pflags == FLAGS_N
4411 	&& reg_mentioned_p (op0, pp))
4412       {
4413 #if DEBUG_CMP
4414 	fprintf(stderr, "intermediate non-flags insn uses op:\n");
4415 	debug_rtx(prev);
4416 #endif
4417 	return false;
4418       }
4419 
4420     /* Check for comparisons against memory - between volatiles and
4421        aliases, we just can't risk this one.  */
4422     if (GET_CODE (operands[0]) == MEM
4423 	|| GET_CODE (operands[0]) == MEM)
4424       {
4425 #if DEBUG_CMP
4426 	fprintf(stderr, "comparisons with memory:\n");
4427 	debug_rtx(prev);
4428 #endif
4429 	return false;
4430       }
4431 
4432     /* Check for PREV changing a register that's used to compute a
4433        value in CMP, even if it doesn't otherwise change flags.  */
4434     if (GET_CODE (operands[0]) == REG
4435 	&& rtx_referenced_p (SET_DEST (PATTERN (prev)), operands[0]))
4436       {
4437 #if DEBUG_CMP
4438 	fprintf(stderr, "sub-value affected, op0:\n");
4439 	debug_rtx(prev);
4440 #endif
4441 	return false;
4442       }
4443     if (GET_CODE (operands[1]) == REG
4444 	&& rtx_referenced_p (SET_DEST (PATTERN (prev)), operands[1]))
4445       {
4446 #if DEBUG_CMP
4447 	fprintf(stderr, "sub-value affected, op1:\n");
4448 	debug_rtx(prev);
4449 #endif
4450 	return false;
4451       }
4452 
4453   } while (pflags == FLAGS_N);
4454 #if DEBUG_CMP
4455   fprintf(stderr, "previous flag-setting insn:\n");
4456   debug_rtx(prev);
4457   debug_rtx(pp);
4458 #endif
4459 
4460   if (GET_CODE (pp) == SET
4461       && GET_CODE (XEXP (pp, 0)) == REG
4462       && REGNO (XEXP (pp, 0)) == FLG_REGNO
4463       && GET_CODE (XEXP (pp, 1)) == COMPARE)
4464     {
4465       /* Adjacent cbranches must have the same operands to be
4466 	 redundant.  */
4467       rtx pop0 = XEXP (XEXP (pp, 1), 0);
4468       rtx pop1 = XEXP (XEXP (pp, 1), 1);
4469 #if DEBUG_CMP
4470       fprintf(stderr, "adjacent cbranches\n");
4471       debug_rtx(pop0);
4472       debug_rtx(pop1);
4473 #endif
4474       if (rtx_equal_p (op0, pop0)
4475 	  && rtx_equal_p (op1, pop1))
4476 	return true;
4477 #if DEBUG_CMP
4478       fprintf(stderr, "prev cmp not same\n");
4479 #endif
4480       return false;
4481     }
4482 
4483   /* Else the previous insn must be a SET, with either the source or
4484      dest equal to operands[0], and operands[1] must be zero.  */
4485 
4486   if (!rtx_equal_p (op1, const0_rtx))
4487     {
4488 #if DEBUG_CMP
4489       fprintf(stderr, "operands[1] not const0_rtx\n");
4490 #endif
4491       return false;
4492     }
4493   if (GET_CODE (pp) != SET)
4494     {
4495 #if DEBUG_CMP
4496       fprintf (stderr, "pp not set\n");
4497 #endif
4498       return false;
4499     }
4500   if (!rtx_equal_p (op0, SET_SRC (pp))
4501       && !rtx_equal_p (op0, SET_DEST (pp)))
4502     {
4503 #if DEBUG_CMP
4504       fprintf(stderr, "operands[0] not found in set\n");
4505 #endif
4506       return false;
4507     }
4508 
4509 #if DEBUG_CMP
4510   fprintf(stderr, "cmp flags %x prev flags %x\n", flags_needed, pflags);
4511 #endif
4512   if ((pflags & flags_needed) == flags_needed)
4513     return true;
4514 
4515   return false;
4516 }
4517 
4518 /* Return the pattern for a compare.  This will be commented out if
4519    the compare is redundant, else a normal pattern is returned.  Thus,
4520    the assembler output says where the compare would have been.  */
4521 char *
4522 m32c_output_compare (rtx_insn *insn, rtx *operands)
4523 {
4524   static char templ[] = ";cmp.b\t%1,%0";
4525   /*                             ^ 5  */
4526 
4527   templ[5] = " bwll"[GET_MODE_SIZE(GET_MODE(operands[0]))];
4528   if (m32c_compare_redundant (insn, operands))
4529     {
4530 #if DEBUG_CMP
4531       fprintf(stderr, "cbranch: cmp not needed\n");
4532 #endif
4533       return templ;
4534     }
4535 
4536 #if DEBUG_CMP
4537   fprintf(stderr, "cbranch: cmp needed: `%s'\n", templ + 1);
4538 #endif
4539   return templ + 1;
4540 }
4541 
4542 #undef TARGET_ENCODE_SECTION_INFO
4543 #define TARGET_ENCODE_SECTION_INFO m32c_encode_section_info
4544 
4545 /* If the frame pointer isn't used, we detect it manually.  But the
4546    stack pointer doesn't have as flexible addressing as the frame
4547    pointer, so we always assume we have it.  */
4548 
4549 #undef TARGET_FRAME_POINTER_REQUIRED
4550 #define TARGET_FRAME_POINTER_REQUIRED hook_bool_void_true
4551 
4552 /* The Global `targetm' Variable. */
4553 
4554 struct gcc_target targetm = TARGET_INITIALIZER;
4555 
4556 #include "gt-m32c.h"
4557