xref: /netbsd-src/external/gpl3/gcc.old/dist/gcc/config/m32c/m32c.c (revision 404ee5b9334f618040b6cdef96a0ff35a6fc4636)
1 /* Target Code for R8C/M16C/M32C
2    Copyright (C) 2005-2017 Free Software Foundation, Inc.
3    Contributed by Red Hat.
4 
5    This file is part of GCC.
6 
7    GCC is free software; you can redistribute it and/or modify it
8    under the terms of the GNU General Public License as published
9    by the Free Software Foundation; either version 3, or (at your
10    option) any later version.
11 
12    GCC is distributed in the hope that it will be useful, but WITHOUT
13    ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14    or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
15    License for more details.
16 
17    You should have received a copy of the GNU General Public License
18    along with GCC; see the file COPYING3.  If not see
19    <http://www.gnu.org/licenses/>.  */
20 
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "df.h"
29 #include "memmodel.h"
30 #include "tm_p.h"
31 #include "optabs.h"
32 #include "regs.h"
33 #include "emit-rtl.h"
34 #include "recog.h"
35 #include "diagnostic-core.h"
36 #include "output.h"
37 #include "insn-attr.h"
38 #include "flags.h"
39 #include "reload.h"
40 #include "stor-layout.h"
41 #include "varasm.h"
42 #include "calls.h"
43 #include "explow.h"
44 #include "expr.h"
45 #include "tm-constrs.h"
46 #include "builtins.h"
47 
48 /* This file should be included last.  */
49 #include "target-def.h"
50 
51 /* Prototypes */
52 
53 /* Used by m32c_pushm_popm.  */
54 typedef enum
55 {
56   PP_pushm,
57   PP_popm,
58   PP_justcount
59 } Push_Pop_Type;
60 
61 static bool m32c_function_needs_enter (void);
62 static tree interrupt_handler (tree *, tree, tree, int, bool *);
63 static tree function_vector_handler (tree *, tree, tree, int, bool *);
64 static int interrupt_p (tree node);
65 static int bank_switch_p (tree node);
66 static int fast_interrupt_p (tree node);
67 static int interrupt_p (tree node);
68 static bool m32c_asm_integer (rtx, unsigned int, int);
69 static int m32c_comp_type_attributes (const_tree, const_tree);
70 static bool m32c_fixed_condition_code_regs (unsigned int *, unsigned int *);
71 static struct machine_function *m32c_init_machine_status (void);
72 static void m32c_insert_attributes (tree, tree *);
73 static bool m32c_legitimate_address_p (machine_mode, rtx, bool);
74 static bool m32c_addr_space_legitimate_address_p (machine_mode, rtx, bool, addr_space_t);
75 static rtx m32c_function_arg (cumulative_args_t, machine_mode,
76 			      const_tree, bool);
77 static bool m32c_pass_by_reference (cumulative_args_t, machine_mode,
78 				    const_tree, bool);
79 static void m32c_function_arg_advance (cumulative_args_t, machine_mode,
80 				       const_tree, bool);
81 static unsigned int m32c_function_arg_boundary (machine_mode, const_tree);
82 static int m32c_pushm_popm (Push_Pop_Type);
83 static bool m32c_strict_argument_naming (cumulative_args_t);
84 static rtx m32c_struct_value_rtx (tree, int);
85 static rtx m32c_subreg (machine_mode, rtx, machine_mode, int);
86 static int need_to_save (int);
87 static rtx m32c_function_value (const_tree, const_tree, bool);
88 static rtx m32c_libcall_value (machine_mode, const_rtx);
89 
90 /* Returns true if an address is specified, else false.  */
91 static bool m32c_get_pragma_address (const char *varname, unsigned *addr);
92 
93 #define SYMBOL_FLAG_FUNCVEC_FUNCTION    (SYMBOL_FLAG_MACH_DEP << 0)
94 
95 #define streq(a,b) (strcmp ((a), (b)) == 0)
96 
97 /* Internal support routines */
98 
99 /* Debugging statements are tagged with DEBUG0 only so that they can
100    be easily enabled individually, by replacing the '0' with '1' as
101    needed.  */
102 #define DEBUG0 0
103 #define DEBUG1 1
104 
105 #if DEBUG0
106 #include "print-tree.h"
107 /* This is needed by some of the commented-out debug statements
108    below.  */
109 static char const *class_names[LIM_REG_CLASSES] = REG_CLASS_NAMES;
110 #endif
111 static int class_contents[LIM_REG_CLASSES][1] = REG_CLASS_CONTENTS;
112 
113 /* These are all to support encode_pattern().  */
114 static char pattern[30], *patternp;
115 static GTY(()) rtx patternr[30];
116 #define RTX_IS(x) (streq (pattern, x))
117 
118 /* Some macros to simplify the logic throughout this file.  */
119 #define IS_MEM_REGNO(regno) ((regno) >= MEM0_REGNO && (regno) <= MEM7_REGNO)
120 #define IS_MEM_REG(rtx) (GET_CODE (rtx) == REG && IS_MEM_REGNO (REGNO (rtx)))
121 
122 #define IS_CR_REGNO(regno) ((regno) >= SB_REGNO && (regno) <= PC_REGNO)
123 #define IS_CR_REG(rtx) (GET_CODE (rtx) == REG && IS_CR_REGNO (REGNO (rtx)))
124 
125 static int
126 far_addr_space_p (rtx x)
127 {
128   if (GET_CODE (x) != MEM)
129     return 0;
130 #if DEBUG0
131   fprintf(stderr, "\033[35mfar_addr_space: "); debug_rtx(x);
132   fprintf(stderr, " = %d\033[0m\n", MEM_ADDR_SPACE (x) == ADDR_SPACE_FAR);
133 #endif
134   return MEM_ADDR_SPACE (x) == ADDR_SPACE_FAR;
135 }
136 
137 /* We do most RTX matching by converting the RTX into a string, and
138    using string compares.  This vastly simplifies the logic in many of
139    the functions in this file.
140 
141    On exit, pattern[] has the encoded string (use RTX_IS("...") to
142    compare it) and patternr[] has pointers to the nodes in the RTX
143    corresponding to each character in the encoded string.  The latter
144    is mostly used by print_operand().
145 
146    Unrecognized patterns have '?' in them; this shows up when the
147    assembler complains about syntax errors.
148 */
149 
150 static void
151 encode_pattern_1 (rtx x)
152 {
153   int i;
154 
155   if (patternp == pattern + sizeof (pattern) - 2)
156     {
157       patternp[-1] = '?';
158       return;
159     }
160 
161   patternr[patternp - pattern] = x;
162 
163   switch (GET_CODE (x))
164     {
165     case REG:
166       *patternp++ = 'r';
167       break;
168     case SUBREG:
169       if (GET_MODE_SIZE (GET_MODE (x)) !=
170 	  GET_MODE_SIZE (GET_MODE (XEXP (x, 0))))
171 	*patternp++ = 'S';
172       if (GET_MODE (x) == PSImode
173 	  && GET_CODE (XEXP (x, 0)) == REG)
174 	*patternp++ = 'S';
175       encode_pattern_1 (XEXP (x, 0));
176       break;
177     case MEM:
178       *patternp++ = 'm';
179       /* FALLTHRU */
180     case CONST:
181       encode_pattern_1 (XEXP (x, 0));
182       break;
183     case SIGN_EXTEND:
184       *patternp++ = '^';
185       *patternp++ = 'S';
186       encode_pattern_1 (XEXP (x, 0));
187       break;
188     case ZERO_EXTEND:
189       *patternp++ = '^';
190       *patternp++ = 'Z';
191       encode_pattern_1 (XEXP (x, 0));
192       break;
193     case PLUS:
194       *patternp++ = '+';
195       encode_pattern_1 (XEXP (x, 0));
196       encode_pattern_1 (XEXP (x, 1));
197       break;
198     case PRE_DEC:
199       *patternp++ = '>';
200       encode_pattern_1 (XEXP (x, 0));
201       break;
202     case POST_INC:
203       *patternp++ = '<';
204       encode_pattern_1 (XEXP (x, 0));
205       break;
206     case LO_SUM:
207       *patternp++ = 'L';
208       encode_pattern_1 (XEXP (x, 0));
209       encode_pattern_1 (XEXP (x, 1));
210       break;
211     case HIGH:
212       *patternp++ = 'H';
213       encode_pattern_1 (XEXP (x, 0));
214       break;
215     case SYMBOL_REF:
216       *patternp++ = 's';
217       break;
218     case LABEL_REF:
219       *patternp++ = 'l';
220       break;
221     case CODE_LABEL:
222       *patternp++ = 'c';
223       break;
224     case CONST_INT:
225     case CONST_DOUBLE:
226       *patternp++ = 'i';
227       break;
228     case UNSPEC:
229       *patternp++ = 'u';
230       *patternp++ = '0' + XCINT (x, 1, UNSPEC);
231       for (i = 0; i < XVECLEN (x, 0); i++)
232 	encode_pattern_1 (XVECEXP (x, 0, i));
233       break;
234     case USE:
235       *patternp++ = 'U';
236       break;
237     case PARALLEL:
238       *patternp++ = '|';
239       for (i = 0; i < XVECLEN (x, 0); i++)
240 	encode_pattern_1 (XVECEXP (x, 0, i));
241       break;
242     case EXPR_LIST:
243       *patternp++ = 'E';
244       encode_pattern_1 (XEXP (x, 0));
245       if (XEXP (x, 1))
246 	encode_pattern_1 (XEXP (x, 1));
247       break;
248     default:
249       *patternp++ = '?';
250 #if DEBUG0
251       fprintf (stderr, "can't encode pattern %s\n",
252 	       GET_RTX_NAME (GET_CODE (x)));
253       debug_rtx (x);
254 #endif
255       break;
256     }
257 }
258 
259 static void
260 encode_pattern (rtx x)
261 {
262   patternp = pattern;
263   encode_pattern_1 (x);
264   *patternp = 0;
265 }
266 
267 /* Since register names indicate the mode they're used in, we need a
268    way to determine which name to refer to the register with.  Called
269    by print_operand().  */
270 
271 static const char *
272 reg_name_with_mode (int regno, machine_mode mode)
273 {
274   int mlen = GET_MODE_SIZE (mode);
275   if (regno == R0_REGNO && mlen == 1)
276     return "r0l";
277   if (regno == R0_REGNO && (mlen == 3 || mlen == 4))
278     return "r2r0";
279   if (regno == R0_REGNO && mlen == 6)
280     return "r2r1r0";
281   if (regno == R0_REGNO && mlen == 8)
282     return "r3r1r2r0";
283   if (regno == R1_REGNO && mlen == 1)
284     return "r1l";
285   if (regno == R1_REGNO && (mlen == 3 || mlen == 4))
286     return "r3r1";
287   if (regno == A0_REGNO && TARGET_A16 && (mlen == 3 || mlen == 4))
288     return "a1a0";
289   return reg_names[regno];
290 }
291 
292 /* How many bytes a register uses on stack when it's pushed.  We need
293    to know this because the push opcode needs to explicitly indicate
294    the size of the register, even though the name of the register
295    already tells it that.  Used by m32c_output_reg_{push,pop}, which
296    is only used through calls to ASM_OUTPUT_REG_{PUSH,POP}.  */
297 
298 static int
299 reg_push_size (int regno)
300 {
301   switch (regno)
302     {
303     case R0_REGNO:
304     case R1_REGNO:
305       return 2;
306     case R2_REGNO:
307     case R3_REGNO:
308     case FLG_REGNO:
309       return 2;
310     case A0_REGNO:
311     case A1_REGNO:
312     case SB_REGNO:
313     case FB_REGNO:
314     case SP_REGNO:
315       if (TARGET_A16)
316 	return 2;
317       else
318 	return 3;
319     default:
320       gcc_unreachable ();
321     }
322 }
323 
324 /* Given two register classes, find the largest intersection between
325    them.  If there is no intersection, return RETURNED_IF_EMPTY
326    instead.  */
327 static reg_class_t
328 reduce_class (reg_class_t original_class, reg_class_t limiting_class,
329 	      reg_class_t returned_if_empty)
330 {
331   HARD_REG_SET cc;
332   int i;
333   reg_class_t best = NO_REGS;
334   unsigned int best_size = 0;
335 
336   if (original_class == limiting_class)
337     return original_class;
338 
339   cc = reg_class_contents[original_class];
340   AND_HARD_REG_SET (cc, reg_class_contents[limiting_class]);
341 
342   for (i = 0; i < LIM_REG_CLASSES; i++)
343     {
344       if (hard_reg_set_subset_p (reg_class_contents[i], cc))
345 	if (best_size < reg_class_size[i])
346 	  {
347 	    best = (reg_class_t) i;
348 	    best_size = reg_class_size[i];
349 	  }
350 
351     }
352   if (best == NO_REGS)
353     return returned_if_empty;
354   return best;
355 }
356 
357 /* Used by m32c_register_move_cost to determine if a move is
358    impossibly expensive.  */
359 static bool
360 class_can_hold_mode (reg_class_t rclass, machine_mode mode)
361 {
362   /* Cache the results:  0=untested  1=no  2=yes */
363   static char results[LIM_REG_CLASSES][MAX_MACHINE_MODE];
364 
365   if (results[(int) rclass][mode] == 0)
366     {
367       int r;
368       results[rclass][mode] = 1;
369       for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
370 	if (in_hard_reg_set_p (reg_class_contents[(int) rclass], mode, r)
371 	    && HARD_REGNO_MODE_OK (r, mode))
372 	  {
373 	    results[rclass][mode] = 2;
374 	    break;
375 	  }
376     }
377 
378 #if DEBUG0
379   fprintf (stderr, "class %s can hold %s? %s\n",
380 	   class_names[(int) rclass], mode_name[mode],
381 	   (results[rclass][mode] == 2) ? "yes" : "no");
382 #endif
383   return results[(int) rclass][mode] == 2;
384 }
385 
386 /* Run-time Target Specification.  */
387 
388 /* Memregs are memory locations that gcc treats like general
389    registers, as there are a limited number of true registers and the
390    m32c families can use memory in most places that registers can be
391    used.
392 
393    However, since memory accesses are more expensive than registers,
394    we allow the user to limit the number of memregs available, in
395    order to try to persuade gcc to try harder to use real registers.
396 
397    Memregs are provided by lib1funcs.S.
398 */
399 
400 int ok_to_change_target_memregs = TRUE;
401 
402 /* Implements TARGET_OPTION_OVERRIDE.  */
403 
404 #undef TARGET_OPTION_OVERRIDE
405 #define TARGET_OPTION_OVERRIDE m32c_option_override
406 
407 static void
408 m32c_option_override (void)
409 {
410   /* We limit memregs to 0..16, and provide a default.  */
411   if (global_options_set.x_target_memregs)
412     {
413       if (target_memregs < 0 || target_memregs > 16)
414 	error ("invalid target memregs value '%d'", target_memregs);
415     }
416   else
417     target_memregs = 16;
418 
419   if (TARGET_A24)
420     flag_ivopts = 0;
421 
422   /* This target defaults to strict volatile bitfields.  */
423   if (flag_strict_volatile_bitfields < 0 && abi_version_at_least(2))
424     flag_strict_volatile_bitfields = 1;
425 
426   /* r8c/m16c have no 16-bit indirect call, so thunks are involved.
427      This is always worse than an absolute call.  */
428   if (TARGET_A16)
429     flag_no_function_cse = 1;
430 
431   /* This wants to put insns between compares and their jumps.  */
432   /* FIXME: The right solution is to properly trace the flags register
433      values, but that is too much work for stage 4.  */
434   flag_combine_stack_adjustments = 0;
435 }
436 
437 #undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
438 #define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE m32c_override_options_after_change
439 
440 static void
441 m32c_override_options_after_change (void)
442 {
443   if (TARGET_A16)
444     flag_no_function_cse = 1;
445 }
446 
447 /* Defining data structures for per-function information */
448 
449 /* The usual; we set up our machine_function data.  */
450 static struct machine_function *
451 m32c_init_machine_status (void)
452 {
453   return ggc_cleared_alloc<machine_function> ();
454 }
455 
456 /* Implements INIT_EXPANDERS.  We just set up to call the above
457    function.  */
458 void
459 m32c_init_expanders (void)
460 {
461   init_machine_status = m32c_init_machine_status;
462 }
463 
464 /* Storage Layout */
465 
466 /* Register Basics */
467 
468 /* Basic Characteristics of Registers */
469 
470 /* Whether a mode fits in a register is complex enough to warrant a
471    table.  */
472 static struct
473 {
474   char qi_regs;
475   char hi_regs;
476   char pi_regs;
477   char si_regs;
478   char di_regs;
479 } nregs_table[FIRST_PSEUDO_REGISTER] =
480 {
481   { 1, 1, 2, 2, 4 },		/* r0 */
482   { 0, 1, 0, 0, 0 },		/* r2 */
483   { 1, 1, 2, 2, 0 },		/* r1 */
484   { 0, 1, 0, 0, 0 },		/* r3 */
485   { 0, 1, 1, 0, 0 },		/* a0 */
486   { 0, 1, 1, 0, 0 },		/* a1 */
487   { 0, 1, 1, 0, 0 },		/* sb */
488   { 0, 1, 1, 0, 0 },		/* fb */
489   { 0, 1, 1, 0, 0 },		/* sp */
490   { 1, 1, 1, 0, 0 },		/* pc */
491   { 0, 0, 0, 0, 0 },		/* fl */
492   { 1, 1, 1, 0, 0 },		/* ap */
493   { 1, 1, 2, 2, 4 },		/* mem0 */
494   { 1, 1, 2, 2, 4 },		/* mem1 */
495   { 1, 1, 2, 2, 4 },		/* mem2 */
496   { 1, 1, 2, 2, 4 },		/* mem3 */
497   { 1, 1, 2, 2, 4 },		/* mem4 */
498   { 1, 1, 2, 2, 0 },		/* mem5 */
499   { 1, 1, 2, 2, 0 },		/* mem6 */
500   { 1, 1, 0, 0, 0 },		/* mem7 */
501 };
502 
503 /* Implements TARGET_CONDITIONAL_REGISTER_USAGE.  We adjust the number
504    of available memregs, and select which registers need to be preserved
505    across calls based on the chip family.  */
506 
507 #undef TARGET_CONDITIONAL_REGISTER_USAGE
508 #define TARGET_CONDITIONAL_REGISTER_USAGE m32c_conditional_register_usage
509 void
510 m32c_conditional_register_usage (void)
511 {
512   int i;
513 
514   if (0 <= target_memregs && target_memregs <= 16)
515     {
516       /* The command line option is bytes, but our "registers" are
517 	 16-bit words.  */
518       for (i = (target_memregs+1)/2; i < 8; i++)
519 	{
520 	  fixed_regs[MEM0_REGNO + i] = 1;
521 	  CLEAR_HARD_REG_BIT (reg_class_contents[MEM_REGS], MEM0_REGNO + i);
522 	}
523     }
524 
525   /* M32CM and M32C preserve more registers across function calls.  */
526   if (TARGET_A24)
527     {
528       call_used_regs[R1_REGNO] = 0;
529       call_used_regs[R2_REGNO] = 0;
530       call_used_regs[R3_REGNO] = 0;
531       call_used_regs[A0_REGNO] = 0;
532       call_used_regs[A1_REGNO] = 0;
533     }
534 }
535 
536 /* How Values Fit in Registers */
537 
538 /* Implements HARD_REGNO_NREGS.  This is complicated by the fact that
539    different registers are different sizes from each other, *and* may
540    be different sizes in different chip families.  */
541 static int
542 m32c_hard_regno_nregs_1 (int regno, machine_mode mode)
543 {
544   if (regno == FLG_REGNO && mode == CCmode)
545     return 1;
546   if (regno >= FIRST_PSEUDO_REGISTER)
547     return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
548 
549   if (regno >= MEM0_REGNO && regno <= MEM7_REGNO)
550     return (GET_MODE_SIZE (mode) + 1) / 2;
551 
552   if (GET_MODE_SIZE (mode) <= 1)
553     return nregs_table[regno].qi_regs;
554   if (GET_MODE_SIZE (mode) <= 2)
555     return nregs_table[regno].hi_regs;
556   if (regno == A0_REGNO && mode == SImode && TARGET_A16)
557     return 2;
558   if ((GET_MODE_SIZE (mode) <= 3 || mode == PSImode) && TARGET_A24)
559     return nregs_table[regno].pi_regs;
560   if (GET_MODE_SIZE (mode) <= 4)
561     return nregs_table[regno].si_regs;
562   if (GET_MODE_SIZE (mode) <= 8)
563     return nregs_table[regno].di_regs;
564   return 0;
565 }
566 
567 int
568 m32c_hard_regno_nregs (int regno, machine_mode mode)
569 {
570   int rv = m32c_hard_regno_nregs_1 (regno, mode);
571   return rv ? rv : 1;
572 }
573 
574 /* Implements HARD_REGNO_MODE_OK.  The above function does the work
575    already; just test its return value.  */
576 int
577 m32c_hard_regno_ok (int regno, machine_mode mode)
578 {
579   return m32c_hard_regno_nregs_1 (regno, mode) != 0;
580 }
581 
582 /* Implements MODES_TIEABLE_P.  In general, modes aren't tieable since
583    registers are all different sizes.  However, since most modes are
584    bigger than our registers anyway, it's easier to implement this
585    function that way, leaving QImode as the only unique case.  */
586 int
587 m32c_modes_tieable_p (machine_mode m1, machine_mode m2)
588 {
589   if (GET_MODE_SIZE (m1) == GET_MODE_SIZE (m2))
590     return 1;
591 
592 #if 0
593   if (m1 == QImode || m2 == QImode)
594     return 0;
595 #endif
596 
597   return 1;
598 }
599 
600 /* Register Classes */
601 
602 /* Implements REGNO_REG_CLASS.  */
603 enum reg_class
604 m32c_regno_reg_class (int regno)
605 {
606   switch (regno)
607     {
608     case R0_REGNO:
609       return R0_REGS;
610     case R1_REGNO:
611       return R1_REGS;
612     case R2_REGNO:
613       return R2_REGS;
614     case R3_REGNO:
615       return R3_REGS;
616     case A0_REGNO:
617       return A0_REGS;
618     case A1_REGNO:
619       return A1_REGS;
620     case SB_REGNO:
621       return SB_REGS;
622     case FB_REGNO:
623       return FB_REGS;
624     case SP_REGNO:
625       return SP_REGS;
626     case FLG_REGNO:
627       return FLG_REGS;
628     default:
629       if (IS_MEM_REGNO (regno))
630 	return MEM_REGS;
631       return ALL_REGS;
632     }
633 }
634 
635 /* Implements REGNO_OK_FOR_BASE_P.  */
636 int
637 m32c_regno_ok_for_base_p (int regno)
638 {
639   if (regno == A0_REGNO
640       || regno == A1_REGNO || regno >= FIRST_PSEUDO_REGISTER)
641     return 1;
642   return 0;
643 }
644 
645 /* Implements TARGET_PREFERRED_RELOAD_CLASS.  In general, prefer general
646    registers of the appropriate size.  */
647 
648 #undef TARGET_PREFERRED_RELOAD_CLASS
649 #define TARGET_PREFERRED_RELOAD_CLASS m32c_preferred_reload_class
650 
651 static reg_class_t
652 m32c_preferred_reload_class (rtx x, reg_class_t rclass)
653 {
654   reg_class_t newclass = rclass;
655 
656 #if DEBUG0
657   fprintf (stderr, "\npreferred_reload_class for %s is ",
658 	   class_names[rclass]);
659 #endif
660   if (rclass == NO_REGS)
661     rclass = GET_MODE (x) == QImode ? HL_REGS : R03_REGS;
662 
663   if (reg_classes_intersect_p (rclass, CR_REGS))
664     {
665       switch (GET_MODE (x))
666 	{
667 	case QImode:
668 	  newclass = HL_REGS;
669 	  break;
670 	default:
671 	  /*      newclass = HI_REGS; */
672 	  break;
673 	}
674     }
675 
676   else if (newclass == QI_REGS && GET_MODE_SIZE (GET_MODE (x)) > 2)
677     newclass = SI_REGS;
678   else if (GET_MODE_SIZE (GET_MODE (x)) > 4
679 	   && ! reg_class_subset_p (R03_REGS, rclass))
680     newclass = DI_REGS;
681 
682   rclass = reduce_class (rclass, newclass, rclass);
683 
684   if (GET_MODE (x) == QImode)
685     rclass = reduce_class (rclass, HL_REGS, rclass);
686 
687 #if DEBUG0
688   fprintf (stderr, "%s\n", class_names[rclass]);
689   debug_rtx (x);
690 
691   if (GET_CODE (x) == MEM
692       && GET_CODE (XEXP (x, 0)) == PLUS
693       && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
694     fprintf (stderr, "Glorm!\n");
695 #endif
696   return rclass;
697 }
698 
699 /* Implements TARGET_PREFERRED_OUTPUT_RELOAD_CLASS.  */
700 
701 #undef TARGET_PREFERRED_OUTPUT_RELOAD_CLASS
702 #define TARGET_PREFERRED_OUTPUT_RELOAD_CLASS m32c_preferred_output_reload_class
703 
704 static reg_class_t
705 m32c_preferred_output_reload_class (rtx x, reg_class_t rclass)
706 {
707   return m32c_preferred_reload_class (x, rclass);
708 }
709 
710 /* Implements LIMIT_RELOAD_CLASS.  We basically want to avoid using
711    address registers for reloads since they're needed for address
712    reloads.  */
713 int
714 m32c_limit_reload_class (machine_mode mode, int rclass)
715 {
716 #if DEBUG0
717   fprintf (stderr, "limit_reload_class for %s: %s ->",
718 	   mode_name[mode], class_names[rclass]);
719 #endif
720 
721   if (mode == QImode)
722     rclass = reduce_class (rclass, HL_REGS, rclass);
723   else if (mode == HImode)
724     rclass = reduce_class (rclass, HI_REGS, rclass);
725   else if (mode == SImode)
726     rclass = reduce_class (rclass, SI_REGS, rclass);
727 
728   if (rclass != A_REGS)
729     rclass = reduce_class (rclass, DI_REGS, rclass);
730 
731 #if DEBUG0
732   fprintf (stderr, " %s\n", class_names[rclass]);
733 #endif
734   return rclass;
735 }
736 
737 /* Implements SECONDARY_RELOAD_CLASS.  QImode have to be reloaded in
738    r0 or r1, as those are the only real QImode registers.  CR regs get
739    reloaded through appropriately sized general or address
740    registers.  */
741 int
742 m32c_secondary_reload_class (int rclass, machine_mode mode, rtx x)
743 {
744   int cc = class_contents[rclass][0];
745 #if DEBUG0
746   fprintf (stderr, "\nsecondary reload class %s %s\n",
747 	   class_names[rclass], mode_name[mode]);
748   debug_rtx (x);
749 #endif
750   if (mode == QImode
751       && GET_CODE (x) == MEM && (cc & ~class_contents[R23_REGS][0]) == 0)
752     return QI_REGS;
753   if (reg_classes_intersect_p (rclass, CR_REGS)
754       && GET_CODE (x) == REG
755       && REGNO (x) >= SB_REGNO && REGNO (x) <= SP_REGNO)
756     return (TARGET_A16 || mode == HImode) ? HI_REGS : A_REGS;
757   return NO_REGS;
758 }
759 
760 /* Implements TARGET_CLASS_LIKELY_SPILLED_P.  A_REGS is needed for address
761    reloads.  */
762 
763 #undef TARGET_CLASS_LIKELY_SPILLED_P
764 #define TARGET_CLASS_LIKELY_SPILLED_P m32c_class_likely_spilled_p
765 
766 static bool
767 m32c_class_likely_spilled_p (reg_class_t regclass)
768 {
769   if (regclass == A_REGS)
770     return true;
771 
772   return (reg_class_size[(int) regclass] == 1);
773 }
774 
775 /* Implements TARGET_CLASS_MAX_NREGS.  We calculate this according to its
776    documented meaning, to avoid potential inconsistencies with actual
777    class definitions.  */
778 
779 #undef TARGET_CLASS_MAX_NREGS
780 #define TARGET_CLASS_MAX_NREGS m32c_class_max_nregs
781 
782 static unsigned char
783 m32c_class_max_nregs (reg_class_t regclass, machine_mode mode)
784 {
785   int rn;
786   unsigned char max = 0;
787 
788   for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
789     if (TEST_HARD_REG_BIT (reg_class_contents[(int) regclass], rn))
790       {
791 	unsigned char n = m32c_hard_regno_nregs (rn, mode);
792 	if (max < n)
793 	  max = n;
794       }
795   return max;
796 }
797 
798 /* Implements CANNOT_CHANGE_MODE_CLASS.  Only r0 and r1 can change to
799    QI (r0l, r1l) because the chip doesn't support QI ops on other
800    registers (well, it does on a0/a1 but if we let gcc do that, reload
801    suffers).  Otherwise, we allow changes to larger modes.  */
802 int
803 m32c_cannot_change_mode_class (machine_mode from,
804 			       machine_mode to, int rclass)
805 {
806   int rn;
807 #if DEBUG0
808   fprintf (stderr, "cannot change from %s to %s in %s\n",
809 	   mode_name[from], mode_name[to], class_names[rclass]);
810 #endif
811 
812   /* If the larger mode isn't allowed in any of these registers, we
813      can't allow the change.  */
814   for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
815     if (class_contents[rclass][0] & (1 << rn))
816       if (! m32c_hard_regno_ok (rn, to))
817 	return 1;
818 
819   if (to == QImode)
820     return (class_contents[rclass][0] & 0x1ffa);
821 
822   if (class_contents[rclass][0] & 0x0005	/* r0, r1 */
823       && GET_MODE_SIZE (from) > 1)
824     return 0;
825   if (GET_MODE_SIZE (from) > 2)	/* all other regs */
826     return 0;
827 
828   return 1;
829 }
830 
831 /* Helpers for the rest of the file.  */
832 /* TRUE if the rtx is a REG rtx for the given register.  */
833 #define IS_REG(rtx,regno) (GET_CODE (rtx) == REG \
834 			   && REGNO (rtx) == regno)
835 /* TRUE if the rtx is a pseudo - specifically, one we can use as a
836    base register in address calculations (hence the "strict"
837    argument).  */
838 #define IS_PSEUDO(rtx,strict) (!strict && GET_CODE (rtx) == REG \
839 			       && (REGNO (rtx) == AP_REGNO \
840 				   || REGNO (rtx) >= FIRST_PSEUDO_REGISTER))
841 
842 #define A0_OR_PSEUDO(x) (IS_REG(x, A0_REGNO) || REGNO (x) >= FIRST_PSEUDO_REGISTER)
843 
844 /* Implements matching for constraints (see next function too).  'S' is
845    for memory constraints, plus "Rpa" for PARALLEL rtx's we use for
846    call return values.  */
847 bool
848 m32c_matches_constraint_p (rtx value, int constraint)
849 {
850   encode_pattern (value);
851 
852   switch (constraint) {
853   case CONSTRAINT_SF:
854     return (far_addr_space_p (value)
855 	    && ((RTX_IS ("mr")
856 		 && A0_OR_PSEUDO (patternr[1])
857 		 && GET_MODE (patternr[1]) == SImode)
858 		|| (RTX_IS ("m+^Sri")
859 		    && A0_OR_PSEUDO (patternr[4])
860 		    && GET_MODE (patternr[4]) == HImode)
861 		|| (RTX_IS ("m+^Srs")
862 		    && A0_OR_PSEUDO (patternr[4])
863 		    && GET_MODE (patternr[4]) == HImode)
864 		|| (RTX_IS ("m+^S+ris")
865 		    && A0_OR_PSEUDO (patternr[5])
866 		    && GET_MODE (patternr[5]) == HImode)
867 		|| RTX_IS ("ms")));
868   case CONSTRAINT_Sd:
869     {
870       /* This is the common "src/dest" address */
871       rtx r;
872       if (GET_CODE (value) == MEM && CONSTANT_P (XEXP (value, 0)))
873 	return true;
874       if (RTX_IS ("ms") || RTX_IS ("m+si"))
875 	return true;
876       if (RTX_IS ("m++rii"))
877 	{
878 	  if (REGNO (patternr[3]) == FB_REGNO
879 	      && INTVAL (patternr[4]) == 0)
880 	    return true;
881 	}
882       if (RTX_IS ("mr"))
883 	r = patternr[1];
884       else if (RTX_IS ("m+ri") || RTX_IS ("m+rs") || RTX_IS ("m+r+si"))
885 	r = patternr[2];
886       else
887 	return false;
888       if (REGNO (r) == SP_REGNO)
889 	return false;
890       return m32c_legitimate_address_p (GET_MODE (value), XEXP (value, 0), 1);
891     }
892   case CONSTRAINT_Sa:
893     {
894       rtx r;
895       if (RTX_IS ("mr"))
896 	r = patternr[1];
897       else if (RTX_IS ("m+ri"))
898 	r = patternr[2];
899       else
900 	return false;
901       return (IS_REG (r, A0_REGNO) || IS_REG (r, A1_REGNO));
902     }
903   case CONSTRAINT_Si:
904     return (RTX_IS ("mi") || RTX_IS ("ms") || RTX_IS ("m+si"));
905   case CONSTRAINT_Ss:
906     return ((RTX_IS ("mr")
907 	     && (IS_REG (patternr[1], SP_REGNO)))
908 	    || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SP_REGNO))));
909   case CONSTRAINT_Sf:
910     return ((RTX_IS ("mr")
911 	     && (IS_REG (patternr[1], FB_REGNO)))
912 	    || (RTX_IS ("m+ri") && (IS_REG (patternr[2], FB_REGNO))));
913   case CONSTRAINT_Sb:
914     return ((RTX_IS ("mr")
915 	     && (IS_REG (patternr[1], SB_REGNO)))
916 	    || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SB_REGNO))));
917   case CONSTRAINT_Sp:
918     /* Absolute addresses 0..0x1fff used for bit addressing (I/O ports) */
919     return (RTX_IS ("mi")
920 	    && !(INTVAL (patternr[1]) & ~0x1fff));
921   case CONSTRAINT_S1:
922     return r1h_operand (value, QImode);
923   case CONSTRAINT_Rpa:
924     return GET_CODE (value) == PARALLEL;
925   default:
926     return false;
927   }
928 }
929 
930 /* STACK AND CALLING */
931 
932 /* Frame Layout */
933 
934 /* Implements RETURN_ADDR_RTX.  Note that R8C and M16C push 24 bits
935    (yes, THREE bytes) onto the stack for the return address, but we
936    don't support pointers bigger than 16 bits on those chips.  This
937    will likely wreak havoc with exception unwinding.  FIXME.  */
938 rtx
939 m32c_return_addr_rtx (int count)
940 {
941   machine_mode mode;
942   int offset;
943   rtx ra_mem;
944 
945   if (count)
946     return NULL_RTX;
947   /* we want 2[$fb] */
948 
949   if (TARGET_A24)
950     {
951       /* It's four bytes */
952       mode = PSImode;
953       offset = 4;
954     }
955   else
956     {
957       /* FIXME: it's really 3 bytes */
958       mode = HImode;
959       offset = 2;
960     }
961 
962   ra_mem =
963     gen_rtx_MEM (mode, plus_constant (Pmode, gen_rtx_REG (Pmode, FP_REGNO),
964 				      offset));
965   return copy_to_mode_reg (mode, ra_mem);
966 }
967 
968 /* Implements INCOMING_RETURN_ADDR_RTX.  See comment above.  */
969 rtx
970 m32c_incoming_return_addr_rtx (void)
971 {
972   /* we want [sp] */
973   return gen_rtx_MEM (PSImode, gen_rtx_REG (PSImode, SP_REGNO));
974 }
975 
976 /* Exception Handling Support */
977 
978 /* Implements EH_RETURN_DATA_REGNO.  Choose registers able to hold
979    pointers.  */
980 int
981 m32c_eh_return_data_regno (int n)
982 {
983   switch (n)
984     {
985     case 0:
986       return MEM0_REGNO;
987     case 1:
988       return MEM0_REGNO+4;
989     default:
990       return INVALID_REGNUM;
991     }
992 }
993 
994 /* Implements EH_RETURN_STACKADJ_RTX.  Saved and used later in
995    m32c_emit_eh_epilogue.  */
996 rtx
997 m32c_eh_return_stackadj_rtx (void)
998 {
999   if (!cfun->machine->eh_stack_adjust)
1000     {
1001       rtx sa;
1002 
1003       sa = gen_rtx_REG (Pmode, R0_REGNO);
1004       cfun->machine->eh_stack_adjust = sa;
1005     }
1006   return cfun->machine->eh_stack_adjust;
1007 }
1008 
1009 /* Registers That Address the Stack Frame */
1010 
1011 /* Implements DWARF_FRAME_REGNUM and DBX_REGISTER_NUMBER.  Note that
1012    the original spec called for dwarf numbers to vary with register
1013    width as well, for example, r0l, r0, and r2r0 would each have
1014    different dwarf numbers.  GCC doesn't support this, and we don't do
1015    it, and gdb seems to like it this way anyway.  */
1016 unsigned int
1017 m32c_dwarf_frame_regnum (int n)
1018 {
1019   switch (n)
1020     {
1021     case R0_REGNO:
1022       return 5;
1023     case R1_REGNO:
1024       return 6;
1025     case R2_REGNO:
1026       return 7;
1027     case R3_REGNO:
1028       return 8;
1029     case A0_REGNO:
1030       return 9;
1031     case A1_REGNO:
1032       return 10;
1033     case FB_REGNO:
1034       return 11;
1035     case SB_REGNO:
1036       return 19;
1037 
1038     case SP_REGNO:
1039       return 12;
1040     case PC_REGNO:
1041       return 13;
1042     default:
1043       return DWARF_FRAME_REGISTERS + 1;
1044     }
1045 }
1046 
1047 /* The frame looks like this:
1048 
1049    ap -> +------------------------------
1050          | Return address (3 or 4 bytes)
1051 	 | Saved FB (2 or 4 bytes)
1052    fb -> +------------------------------
1053 	 | local vars
1054          | register saves fb
1055 	 |        through r0 as needed
1056    sp -> +------------------------------
1057 */
1058 
1059 /* We use this to wrap all emitted insns in the prologue.  */
1060 static rtx
1061 F (rtx x)
1062 {
1063   RTX_FRAME_RELATED_P (x) = 1;
1064   return x;
1065 }
1066 
1067 /* This maps register numbers to the PUSHM/POPM bitfield, and tells us
1068    how much the stack pointer moves for each, for each cpu family.  */
1069 static struct
1070 {
1071   int reg1;
1072   int bit;
1073   int a16_bytes;
1074   int a24_bytes;
1075 } pushm_info[] =
1076 {
1077   /* These are in reverse push (nearest-to-sp) order.  */
1078   { R0_REGNO, 0x80, 2, 2 },
1079   { R1_REGNO, 0x40, 2, 2 },
1080   { R2_REGNO, 0x20, 2, 2 },
1081   { R3_REGNO, 0x10, 2, 2 },
1082   { A0_REGNO, 0x08, 2, 4 },
1083   { A1_REGNO, 0x04, 2, 4 },
1084   { SB_REGNO, 0x02, 2, 4 },
1085   { FB_REGNO, 0x01, 2, 4 }
1086 };
1087 
1088 #define PUSHM_N (sizeof(pushm_info)/sizeof(pushm_info[0]))
1089 
1090 /* Returns TRUE if we need to save/restore the given register.  We
1091    save everything for exception handlers, so that any register can be
1092    unwound.  For interrupt handlers, we save everything if the handler
1093    calls something else (because we don't know what *that* function
1094    might do), but try to be a bit smarter if the handler is a leaf
1095    function.  We always save $a0, though, because we use that in the
1096    epilogue to copy $fb to $sp.  */
1097 static int
1098 need_to_save (int regno)
1099 {
1100   if (fixed_regs[regno])
1101     return 0;
1102   if (crtl->calls_eh_return)
1103     return 1;
1104   if (regno == FP_REGNO)
1105     return 0;
1106   if (cfun->machine->is_interrupt
1107       && (!cfun->machine->is_leaf
1108 	  || (regno == A0_REGNO
1109 	      && m32c_function_needs_enter ())
1110 	  ))
1111     return 1;
1112   if (df_regs_ever_live_p (regno)
1113       && (!call_used_regs[regno] || cfun->machine->is_interrupt))
1114     return 1;
1115   return 0;
1116 }
1117 
1118 /* This function contains all the intelligence about saving and
1119    restoring registers.  It always figures out the register save set.
1120    When called with PP_justcount, it merely returns the size of the
1121    save set (for eliminating the frame pointer, for example).  When
1122    called with PP_pushm or PP_popm, it emits the appropriate
1123    instructions for saving (pushm) or restoring (popm) the
1124    registers.  */
1125 static int
1126 m32c_pushm_popm (Push_Pop_Type ppt)
1127 {
1128   int reg_mask = 0;
1129   int byte_count = 0, bytes;
1130   int i;
1131   rtx dwarf_set[PUSHM_N];
1132   int n_dwarfs = 0;
1133   int nosave_mask = 0;
1134 
1135   if (crtl->return_rtx
1136       && GET_CODE (crtl->return_rtx) == PARALLEL
1137       && !(crtl->calls_eh_return || cfun->machine->is_interrupt))
1138     {
1139       rtx exp = XVECEXP (crtl->return_rtx, 0, 0);
1140       rtx rv = XEXP (exp, 0);
1141       int rv_bytes = GET_MODE_SIZE (GET_MODE (rv));
1142 
1143       if (rv_bytes > 2)
1144 	nosave_mask |= 0x20;	/* PSI, SI */
1145       else
1146 	nosave_mask |= 0xf0;	/* DF */
1147       if (rv_bytes > 4)
1148 	nosave_mask |= 0x50;	/* DI */
1149     }
1150 
1151   for (i = 0; i < (int) PUSHM_N; i++)
1152     {
1153       /* Skip if neither register needs saving.  */
1154       if (!need_to_save (pushm_info[i].reg1))
1155 	continue;
1156 
1157       if (pushm_info[i].bit & nosave_mask)
1158 	continue;
1159 
1160       reg_mask |= pushm_info[i].bit;
1161       bytes = TARGET_A16 ? pushm_info[i].a16_bytes : pushm_info[i].a24_bytes;
1162 
1163       if (ppt == PP_pushm)
1164 	{
1165 	  machine_mode mode = (bytes == 2) ? HImode : SImode;
1166 	  rtx addr;
1167 
1168 	  /* Always use stack_pointer_rtx instead of calling
1169 	     rtx_gen_REG ourselves.  Code elsewhere in GCC assumes
1170 	     that there is a single rtx representing the stack pointer,
1171 	     namely stack_pointer_rtx, and uses == to recognize it.  */
1172 	  addr = stack_pointer_rtx;
1173 
1174 	  if (byte_count != 0)
1175 	    addr = gen_rtx_PLUS (GET_MODE (addr), addr, GEN_INT (byte_count));
1176 
1177 	  dwarf_set[n_dwarfs++] =
1178 	    gen_rtx_SET (gen_rtx_MEM (mode, addr),
1179 			 gen_rtx_REG (mode, pushm_info[i].reg1));
1180 	  F (dwarf_set[n_dwarfs - 1]);
1181 
1182 	}
1183       byte_count += bytes;
1184     }
1185 
1186   if (cfun->machine->is_interrupt)
1187     {
1188       cfun->machine->intr_pushm = reg_mask & 0xfe;
1189       reg_mask = 0;
1190       byte_count = 0;
1191     }
1192 
1193   if (cfun->machine->is_interrupt)
1194     for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1195       if (need_to_save (i))
1196 	{
1197 	  byte_count += 2;
1198 	  cfun->machine->intr_pushmem[i - MEM0_REGNO] = 1;
1199 	}
1200 
1201   if (ppt == PP_pushm && byte_count)
1202     {
1203       rtx note = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (n_dwarfs + 1));
1204       rtx pushm;
1205 
1206       if (reg_mask)
1207 	{
1208 	  XVECEXP (note, 0, 0)
1209 	    = gen_rtx_SET (stack_pointer_rtx,
1210 			   gen_rtx_PLUS (GET_MODE (stack_pointer_rtx),
1211 					 stack_pointer_rtx,
1212 					 GEN_INT (-byte_count)));
1213 	  F (XVECEXP (note, 0, 0));
1214 
1215 	  for (i = 0; i < n_dwarfs; i++)
1216 	    XVECEXP (note, 0, i + 1) = dwarf_set[i];
1217 
1218 	  pushm = F (emit_insn (gen_pushm (GEN_INT (reg_mask))));
1219 
1220 	  add_reg_note (pushm, REG_FRAME_RELATED_EXPR, note);
1221 	}
1222 
1223       if (cfun->machine->is_interrupt)
1224 	for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1225 	  if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1226 	    {
1227 	      if (TARGET_A16)
1228 		pushm = emit_insn (gen_pushhi_16 (gen_rtx_REG (HImode, i)));
1229 	      else
1230 		pushm = emit_insn (gen_pushhi_24 (gen_rtx_REG (HImode, i)));
1231 	      F (pushm);
1232 	    }
1233     }
1234   if (ppt == PP_popm && byte_count)
1235     {
1236       if (cfun->machine->is_interrupt)
1237 	for (i = MEM7_REGNO; i >= MEM0_REGNO; i--)
1238 	  if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1239 	    {
1240 	      if (TARGET_A16)
1241 		emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, i)));
1242 	      else
1243 		emit_insn (gen_pophi_24 (gen_rtx_REG (HImode, i)));
1244 	    }
1245       if (reg_mask)
1246 	emit_insn (gen_popm (GEN_INT (reg_mask)));
1247     }
1248 
1249   return byte_count;
1250 }
1251 
1252 /* Implements INITIAL_ELIMINATION_OFFSET.  See the comment above that
1253    diagrams our call frame.  */
1254 int
1255 m32c_initial_elimination_offset (int from, int to)
1256 {
1257   int ofs = 0;
1258 
1259   if (from == AP_REGNO)
1260     {
1261       if (TARGET_A16)
1262 	ofs += 5;
1263       else
1264 	ofs += 8;
1265     }
1266 
1267   if (to == SP_REGNO)
1268     {
1269       ofs += m32c_pushm_popm (PP_justcount);
1270       ofs += get_frame_size ();
1271     }
1272 
1273   /* Account for push rounding.  */
1274   if (TARGET_A24)
1275     ofs = (ofs + 1) & ~1;
1276 #if DEBUG0
1277   fprintf (stderr, "initial_elimination_offset from=%d to=%d, ofs=%d\n", from,
1278 	   to, ofs);
1279 #endif
1280   return ofs;
1281 }
1282 
1283 /* Passing Function Arguments on the Stack */
1284 
1285 /* Implements PUSH_ROUNDING.  The R8C and M16C have byte stacks, the
1286    M32C has word stacks.  */
1287 unsigned int
1288 m32c_push_rounding (int n)
1289 {
1290   if (TARGET_R8C || TARGET_M16C)
1291     return n;
1292   return (n + 1) & ~1;
1293 }
1294 
1295 /* Passing Arguments in Registers */
1296 
1297 /* Implements TARGET_FUNCTION_ARG.  Arguments are passed partly in
1298    registers, partly on stack.  If our function returns a struct, a
1299    pointer to a buffer for it is at the top of the stack (last thing
1300    pushed).  The first few real arguments may be in registers as
1301    follows:
1302 
1303    R8C/M16C:	arg1 in r1 if it's QI or HI (else it's pushed on stack)
1304 		arg2 in r2 if it's HI (else pushed on stack)
1305 		rest on stack
1306    M32C:        arg1 in r0 if it's QI or HI (else it's pushed on stack)
1307 		rest on stack
1308 
1309    Structs are not passed in registers, even if they fit.  Only
1310    integer and pointer types are passed in registers.
1311 
1312    Note that when arg1 doesn't fit in r1, arg2 may still be passed in
1313    r2 if it fits.  */
1314 #undef TARGET_FUNCTION_ARG
1315 #define TARGET_FUNCTION_ARG m32c_function_arg
1316 static rtx
1317 m32c_function_arg (cumulative_args_t ca_v,
1318 		   machine_mode mode, const_tree type, bool named)
1319 {
1320   CUMULATIVE_ARGS *ca = get_cumulative_args (ca_v);
1321 
1322   /* Can return a reg, parallel, or 0 for stack */
1323   rtx rv = NULL_RTX;
1324 #if DEBUG0
1325   fprintf (stderr, "func_arg %d (%s, %d)\n",
1326 	   ca->parm_num, mode_name[mode], named);
1327   debug_tree ((tree)type);
1328 #endif
1329 
1330   if (mode == VOIDmode)
1331     return GEN_INT (0);
1332 
1333   if (ca->force_mem || !named)
1334     {
1335 #if DEBUG0
1336       fprintf (stderr, "func arg: force %d named %d, mem\n", ca->force_mem,
1337 	       named);
1338 #endif
1339       return NULL_RTX;
1340     }
1341 
1342   if (type && INTEGRAL_TYPE_P (type) && POINTER_TYPE_P (type))
1343     return NULL_RTX;
1344 
1345   if (type && AGGREGATE_TYPE_P (type))
1346     return NULL_RTX;
1347 
1348   switch (ca->parm_num)
1349     {
1350     case 1:
1351       if (GET_MODE_SIZE (mode) == 1 || GET_MODE_SIZE (mode) == 2)
1352 	rv = gen_rtx_REG (mode, TARGET_A16 ? R1_REGNO : R0_REGNO);
1353       break;
1354 
1355     case 2:
1356       if (TARGET_A16 && GET_MODE_SIZE (mode) == 2)
1357 	rv = gen_rtx_REG (mode, R2_REGNO);
1358       break;
1359     }
1360 
1361 #if DEBUG0
1362   debug_rtx (rv);
1363 #endif
1364   return rv;
1365 }
1366 
1367 #undef TARGET_PASS_BY_REFERENCE
1368 #define TARGET_PASS_BY_REFERENCE m32c_pass_by_reference
1369 static bool
1370 m32c_pass_by_reference (cumulative_args_t ca ATTRIBUTE_UNUSED,
1371 			machine_mode mode ATTRIBUTE_UNUSED,
1372 			const_tree type ATTRIBUTE_UNUSED,
1373 			bool named ATTRIBUTE_UNUSED)
1374 {
1375   return 0;
1376 }
1377 
1378 /* Implements INIT_CUMULATIVE_ARGS.  */
1379 void
1380 m32c_init_cumulative_args (CUMULATIVE_ARGS * ca,
1381 			   tree fntype,
1382 			   rtx libname ATTRIBUTE_UNUSED,
1383 			   tree fndecl,
1384 			   int n_named_args ATTRIBUTE_UNUSED)
1385 {
1386   if (fntype && aggregate_value_p (TREE_TYPE (fntype), fndecl))
1387     ca->force_mem = 1;
1388   else
1389     ca->force_mem = 0;
1390   ca->parm_num = 1;
1391 }
1392 
1393 /* Implements TARGET_FUNCTION_ARG_ADVANCE.  force_mem is set for
1394    functions returning structures, so we always reset that.  Otherwise,
1395    we only need to know the sequence number of the argument to know what
1396    to do with it.  */
1397 #undef TARGET_FUNCTION_ARG_ADVANCE
1398 #define TARGET_FUNCTION_ARG_ADVANCE m32c_function_arg_advance
1399 static void
1400 m32c_function_arg_advance (cumulative_args_t ca_v,
1401 			   machine_mode mode ATTRIBUTE_UNUSED,
1402 			   const_tree type ATTRIBUTE_UNUSED,
1403 			   bool named ATTRIBUTE_UNUSED)
1404 {
1405   CUMULATIVE_ARGS *ca = get_cumulative_args (ca_v);
1406 
1407   if (ca->force_mem)
1408     ca->force_mem = 0;
1409   else
1410     ca->parm_num++;
1411 }
1412 
1413 /* Implements TARGET_FUNCTION_ARG_BOUNDARY.  */
1414 #undef TARGET_FUNCTION_ARG_BOUNDARY
1415 #define TARGET_FUNCTION_ARG_BOUNDARY m32c_function_arg_boundary
1416 static unsigned int
1417 m32c_function_arg_boundary (machine_mode mode ATTRIBUTE_UNUSED,
1418 			    const_tree type ATTRIBUTE_UNUSED)
1419 {
1420   return (TARGET_A16 ? 8 : 16);
1421 }
1422 
1423 /* Implements FUNCTION_ARG_REGNO_P.  */
1424 int
1425 m32c_function_arg_regno_p (int r)
1426 {
1427   if (TARGET_A24)
1428     return (r == R0_REGNO);
1429   return (r == R1_REGNO || r == R2_REGNO);
1430 }
1431 
1432 /* HImode and PSImode are the two "native" modes as far as GCC is
1433    concerned, but the chips also support a 32-bit mode which is used
1434    for some opcodes in R8C/M16C and for reset vectors and such.  */
1435 #undef TARGET_VALID_POINTER_MODE
1436 #define TARGET_VALID_POINTER_MODE m32c_valid_pointer_mode
1437 static bool
1438 m32c_valid_pointer_mode (machine_mode mode)
1439 {
1440   if (mode == HImode
1441       || mode == PSImode
1442       || mode == SImode
1443       )
1444     return 1;
1445   return 0;
1446 }
1447 
1448 /* How Scalar Function Values Are Returned */
1449 
1450 /* Implements TARGET_LIBCALL_VALUE.  Most values are returned in $r0, or some
1451    combination of registers starting there (r2r0 for longs, r3r1r2r0
1452    for long long, r3r2r1r0 for doubles), except that that ABI
1453    currently doesn't work because it ends up using all available
1454    general registers and gcc often can't compile it.  So, instead, we
1455    return anything bigger than 16 bits in "mem0" (effectively, a
1456    memory location).  */
1457 
1458 #undef TARGET_LIBCALL_VALUE
1459 #define TARGET_LIBCALL_VALUE m32c_libcall_value
1460 
1461 static rtx
1462 m32c_libcall_value (machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
1463 {
1464   /* return reg or parallel */
1465 #if 0
1466   /* FIXME: GCC has difficulty returning large values in registers,
1467      because that ties up most of the general registers and gives the
1468      register allocator little to work with.  Until we can resolve
1469      this, large values are returned in memory.  */
1470   if (mode == DFmode)
1471     {
1472       rtx rv;
1473 
1474       rv = gen_rtx_PARALLEL (mode, rtvec_alloc (4));
1475       XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1476 					      gen_rtx_REG (HImode,
1477 							   R0_REGNO),
1478 					      GEN_INT (0));
1479       XVECEXP (rv, 0, 1) = gen_rtx_EXPR_LIST (VOIDmode,
1480 					      gen_rtx_REG (HImode,
1481 							   R1_REGNO),
1482 					      GEN_INT (2));
1483       XVECEXP (rv, 0, 2) = gen_rtx_EXPR_LIST (VOIDmode,
1484 					      gen_rtx_REG (HImode,
1485 							   R2_REGNO),
1486 					      GEN_INT (4));
1487       XVECEXP (rv, 0, 3) = gen_rtx_EXPR_LIST (VOIDmode,
1488 					      gen_rtx_REG (HImode,
1489 							   R3_REGNO),
1490 					      GEN_INT (6));
1491       return rv;
1492     }
1493 
1494   if (TARGET_A24 && GET_MODE_SIZE (mode) > 2)
1495     {
1496       rtx rv;
1497 
1498       rv = gen_rtx_PARALLEL (mode, rtvec_alloc (1));
1499       XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1500 					      gen_rtx_REG (mode,
1501 							   R0_REGNO),
1502 					      GEN_INT (0));
1503       return rv;
1504     }
1505 #endif
1506 
1507   if (GET_MODE_SIZE (mode) > 2)
1508     return gen_rtx_REG (mode, MEM0_REGNO);
1509   return gen_rtx_REG (mode, R0_REGNO);
1510 }
1511 
1512 /* Implements TARGET_FUNCTION_VALUE.  Functions and libcalls have the same
1513    conventions.  */
1514 
1515 #undef TARGET_FUNCTION_VALUE
1516 #define TARGET_FUNCTION_VALUE m32c_function_value
1517 
1518 static rtx
1519 m32c_function_value (const_tree valtype,
1520 		     const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
1521 		     bool outgoing ATTRIBUTE_UNUSED)
1522 {
1523   /* return reg or parallel */
1524   const machine_mode mode = TYPE_MODE (valtype);
1525   return m32c_libcall_value (mode, NULL_RTX);
1526 }
1527 
1528 /* Implements TARGET_FUNCTION_VALUE_REGNO_P.  */
1529 
1530 #undef TARGET_FUNCTION_VALUE_REGNO_P
1531 #define TARGET_FUNCTION_VALUE_REGNO_P m32c_function_value_regno_p
1532 
1533 static bool
1534 m32c_function_value_regno_p (const unsigned int regno)
1535 {
1536   return (regno == R0_REGNO || regno == MEM0_REGNO);
1537 }
1538 
1539 /* How Large Values Are Returned */
1540 
1541 /* We return structures by pushing the address on the stack, even if
1542    we use registers for the first few "real" arguments.  */
1543 #undef TARGET_STRUCT_VALUE_RTX
1544 #define TARGET_STRUCT_VALUE_RTX m32c_struct_value_rtx
1545 static rtx
1546 m32c_struct_value_rtx (tree fndecl ATTRIBUTE_UNUSED,
1547 		       int incoming ATTRIBUTE_UNUSED)
1548 {
1549   return 0;
1550 }
1551 
1552 /* Function Entry and Exit */
1553 
1554 /* Implements EPILOGUE_USES.  Interrupts restore all registers.  */
1555 int
1556 m32c_epilogue_uses (int regno ATTRIBUTE_UNUSED)
1557 {
1558   if (cfun->machine->is_interrupt)
1559     return 1;
1560   return 0;
1561 }
1562 
1563 /* Implementing the Varargs Macros */
1564 
1565 #undef TARGET_STRICT_ARGUMENT_NAMING
1566 #define TARGET_STRICT_ARGUMENT_NAMING m32c_strict_argument_naming
1567 static bool
1568 m32c_strict_argument_naming (cumulative_args_t ca ATTRIBUTE_UNUSED)
1569 {
1570   return 1;
1571 }
1572 
1573 /* Trampolines for Nested Functions */
1574 
1575 /*
1576    m16c:
1577    1 0000 75C43412              mov.w   #0x1234,a0
1578    2 0004 FC000000              jmp.a   label
1579 
1580    m32c:
1581    1 0000 BC563412              mov.l:s #0x123456,a0
1582    2 0004 CC000000              jmp.a   label
1583 */
1584 
1585 /* Implements TRAMPOLINE_SIZE.  */
1586 int
1587 m32c_trampoline_size (void)
1588 {
1589   /* Allocate extra space so we can avoid the messy shifts when we
1590      initialize the trampoline; we just write past the end of the
1591      opcode.  */
1592   return TARGET_A16 ? 8 : 10;
1593 }
1594 
1595 /* Implements TRAMPOLINE_ALIGNMENT.  */
1596 int
1597 m32c_trampoline_alignment (void)
1598 {
1599   return 2;
1600 }
1601 
1602 /* Implements TARGET_TRAMPOLINE_INIT.  */
1603 
1604 #undef TARGET_TRAMPOLINE_INIT
1605 #define TARGET_TRAMPOLINE_INIT m32c_trampoline_init
1606 static void
1607 m32c_trampoline_init (rtx m_tramp, tree fndecl, rtx chainval)
1608 {
1609   rtx function = XEXP (DECL_RTL (fndecl), 0);
1610 
1611 #define A0(m,i) adjust_address (m_tramp, m, i)
1612   if (TARGET_A16)
1613     {
1614       /* Note: we subtract a "word" because the moves want signed
1615 	 constants, not unsigned constants.  */
1616       emit_move_insn (A0 (HImode, 0), GEN_INT (0xc475 - 0x10000));
1617       emit_move_insn (A0 (HImode, 2), chainval);
1618       emit_move_insn (A0 (QImode, 4), GEN_INT (0xfc - 0x100));
1619       /* We use 16-bit addresses here, but store the zero to turn it
1620 	 into a 24-bit offset.  */
1621       emit_move_insn (A0 (HImode, 5), function);
1622       emit_move_insn (A0 (QImode, 7), GEN_INT (0x00));
1623     }
1624   else
1625     {
1626       /* Note that the PSI moves actually write 4 bytes.  Make sure we
1627 	 write stuff out in the right order, and leave room for the
1628 	 extra byte at the end.  */
1629       emit_move_insn (A0 (QImode, 0), GEN_INT (0xbc - 0x100));
1630       emit_move_insn (A0 (PSImode, 1), chainval);
1631       emit_move_insn (A0 (QImode, 4), GEN_INT (0xcc - 0x100));
1632       emit_move_insn (A0 (PSImode, 5), function);
1633     }
1634 #undef A0
1635 }
1636 
1637 #undef TARGET_LRA_P
1638 #define TARGET_LRA_P hook_bool_void_false
1639 
1640 /* Addressing Modes */
1641 
1642 /* The r8c/m32c family supports a wide range of non-orthogonal
1643    addressing modes, including the ability to double-indirect on *some*
1644    of them.  Not all insns support all modes, either, but we rely on
1645    predicates and constraints to deal with that.  */
1646 #undef TARGET_LEGITIMATE_ADDRESS_P
1647 #define TARGET_LEGITIMATE_ADDRESS_P m32c_legitimate_address_p
1648 bool
1649 m32c_legitimate_address_p (machine_mode mode, rtx x, bool strict)
1650 {
1651   int mode_adjust;
1652   if (CONSTANT_P (x))
1653     return 1;
1654 
1655   if (TARGET_A16 && GET_MODE (x) != HImode && GET_MODE (x) != SImode)
1656     return 0;
1657   if (TARGET_A24 && GET_MODE (x) != PSImode)
1658     return 0;
1659 
1660   /* Wide references to memory will be split after reload, so we must
1661      ensure that all parts of such splits remain legitimate
1662      addresses.  */
1663   mode_adjust = GET_MODE_SIZE (mode) - 1;
1664 
1665   /* allowing PLUS yields mem:HI(plus:SI(mem:SI(plus:SI in m32c_split_move */
1666   if (GET_CODE (x) == PRE_DEC
1667       || GET_CODE (x) == POST_INC || GET_CODE (x) == PRE_MODIFY)
1668     {
1669       return (GET_CODE (XEXP (x, 0)) == REG
1670 	      && REGNO (XEXP (x, 0)) == SP_REGNO);
1671     }
1672 
1673 #if 0
1674   /* This is the double indirection detection, but it currently
1675      doesn't work as cleanly as this code implies, so until we've had
1676      a chance to debug it, leave it disabled.  */
1677   if (TARGET_A24 && GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) != PLUS)
1678     {
1679 #if DEBUG_DOUBLE
1680       fprintf (stderr, "double indirect\n");
1681 #endif
1682       x = XEXP (x, 0);
1683     }
1684 #endif
1685 
1686   encode_pattern (x);
1687   if (RTX_IS ("r"))
1688     {
1689       /* Most indexable registers can be used without displacements,
1690 	 although some of them will be emitted with an explicit zero
1691 	 to please the assembler.  */
1692       switch (REGNO (patternr[0]))
1693 	{
1694 	case A1_REGNO:
1695 	case SB_REGNO:
1696 	case FB_REGNO:
1697 	case SP_REGNO:
1698 	  if (TARGET_A16 && GET_MODE (x) == SImode)
1699 	    return 0;
1700 	  /* FALLTHRU */
1701 	case A0_REGNO:
1702 	  return 1;
1703 
1704 	default:
1705 	  if (IS_PSEUDO (patternr[0], strict))
1706 	    return 1;
1707 	  return 0;
1708 	}
1709     }
1710 
1711   if (TARGET_A16 && GET_MODE (x) == SImode)
1712     return 0;
1713 
1714   if (RTX_IS ("+ri"))
1715     {
1716       /* This is more interesting, because different base registers
1717 	 allow for different displacements - both range and signedness
1718 	 - and it differs from chip series to chip series too.  */
1719       int rn = REGNO (patternr[1]);
1720       HOST_WIDE_INT offs = INTVAL (patternr[2]);
1721       switch (rn)
1722 	{
1723 	case A0_REGNO:
1724 	case A1_REGNO:
1725 	case SB_REGNO:
1726 	  /* The syntax only allows positive offsets, but when the
1727 	     offsets span the entire memory range, we can simulate
1728 	     negative offsets by wrapping.  */
1729 	  if (TARGET_A16)
1730 	    return (offs >= -65536 && offs <= 65535 - mode_adjust);
1731 	  if (rn == SB_REGNO)
1732 	    return (offs >= 0 && offs <= 65535 - mode_adjust);
1733 	  /* A0 or A1 */
1734 	  return (offs >= -16777216 && offs <= 16777215);
1735 
1736 	case FB_REGNO:
1737 	  if (TARGET_A16)
1738 	    return (offs >= -128 && offs <= 127 - mode_adjust);
1739 	  return (offs >= -65536 && offs <= 65535 - mode_adjust);
1740 
1741 	case SP_REGNO:
1742 	  return (offs >= -128 && offs <= 127 - mode_adjust);
1743 
1744 	default:
1745 	  if (IS_PSEUDO (patternr[1], strict))
1746 	    return 1;
1747 	  return 0;
1748 	}
1749     }
1750   if (RTX_IS ("+rs") || RTX_IS ("+r+si"))
1751     {
1752       rtx reg = patternr[1];
1753 
1754       /* We don't know where the symbol is, so only allow base
1755 	 registers which support displacements spanning the whole
1756 	 address range.  */
1757       switch (REGNO (reg))
1758 	{
1759 	case A0_REGNO:
1760 	case A1_REGNO:
1761 	  /* $sb needs a secondary reload, but since it's involved in
1762 	     memory address reloads too, we don't deal with it very
1763 	     well.  */
1764 	  /*    case SB_REGNO: */
1765 	  return 1;
1766 	default:
1767 	  if (GET_CODE (reg) == SUBREG)
1768 	    return 0;
1769 	  if (IS_PSEUDO (reg, strict))
1770 	    return 1;
1771 	  return 0;
1772 	}
1773     }
1774   return 0;
1775 }
1776 
1777 /* Implements REG_OK_FOR_BASE_P.  */
1778 int
1779 m32c_reg_ok_for_base_p (rtx x, int strict)
1780 {
1781   if (GET_CODE (x) != REG)
1782     return 0;
1783   switch (REGNO (x))
1784     {
1785     case A0_REGNO:
1786     case A1_REGNO:
1787     case SB_REGNO:
1788     case FB_REGNO:
1789     case SP_REGNO:
1790       return 1;
1791     default:
1792       if (IS_PSEUDO (x, strict))
1793 	return 1;
1794       return 0;
1795     }
1796 }
1797 
1798 /* We have three choices for choosing fb->aN offsets.  If we choose -128,
1799    we need one MOVA -128[fb],aN opcode and 16-bit aN displacements,
1800    like this:
1801        EB 4B FF    mova    -128[$fb],$a0
1802        D8 0C FF FF mov.w:Q #0,-1[$a0]
1803 
1804    Alternately, we subtract the frame size, and hopefully use 8-bit aN
1805    displacements:
1806        7B F4       stc $fb,$a0
1807        77 54 00 01 sub #256,$a0
1808        D8 08 01    mov.w:Q #0,1[$a0]
1809 
1810    If we don't offset (i.e. offset by zero), we end up with:
1811        7B F4       stc $fb,$a0
1812        D8 0C 00 FF mov.w:Q #0,-256[$a0]
1813 
1814    We have to subtract *something* so that we have a PLUS rtx to mark
1815    that we've done this reload.  The -128 offset will never result in
1816    an 8-bit aN offset, and the payoff for the second case is five
1817    loads *if* those loads are within 256 bytes of the other end of the
1818    frame, so the third case seems best.  Note that we subtract the
1819    zero, but detect that in the addhi3 pattern.  */
1820 
1821 #define BIG_FB_ADJ 0
1822 
1823 /* Implements LEGITIMIZE_ADDRESS.  The only address we really have to
1824    worry about is frame base offsets, as $fb has a limited
1825    displacement range.  We deal with this by attempting to reload $fb
1826    itself into an address register; that seems to result in the best
1827    code.  */
1828 #undef TARGET_LEGITIMIZE_ADDRESS
1829 #define TARGET_LEGITIMIZE_ADDRESS m32c_legitimize_address
1830 static rtx
1831 m32c_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1832 			 machine_mode mode)
1833 {
1834 #if DEBUG0
1835   fprintf (stderr, "m32c_legitimize_address for mode %s\n", mode_name[mode]);
1836   debug_rtx (x);
1837   fprintf (stderr, "\n");
1838 #endif
1839 
1840   if (GET_CODE (x) == PLUS
1841       && GET_CODE (XEXP (x, 0)) == REG
1842       && REGNO (XEXP (x, 0)) == FB_REGNO
1843       && GET_CODE (XEXP (x, 1)) == CONST_INT
1844       && (INTVAL (XEXP (x, 1)) < -128
1845 	  || INTVAL (XEXP (x, 1)) > (128 - GET_MODE_SIZE (mode))))
1846     {
1847       /* reload FB to A_REGS */
1848       rtx temp = gen_reg_rtx (Pmode);
1849       x = copy_rtx (x);
1850       emit_insn (gen_rtx_SET (temp, XEXP (x, 0)));
1851       XEXP (x, 0) = temp;
1852     }
1853 
1854   return x;
1855 }
1856 
1857 /* Implements LEGITIMIZE_RELOAD_ADDRESS.  See comment above.  */
1858 int
1859 m32c_legitimize_reload_address (rtx * x,
1860 				machine_mode mode,
1861 				int opnum,
1862 				int type, int ind_levels ATTRIBUTE_UNUSED)
1863 {
1864 #if DEBUG0
1865   fprintf (stderr, "\nm32c_legitimize_reload_address for mode %s\n",
1866 	   mode_name[mode]);
1867   debug_rtx (*x);
1868 #endif
1869 
1870   /* At one point, this function tried to get $fb copied to an address
1871      register, which in theory would maximize sharing, but gcc was
1872      *also* still trying to reload the whole address, and we'd run out
1873      of address registers.  So we let gcc do the naive (but safe)
1874      reload instead, when the above function doesn't handle it for
1875      us.
1876 
1877      The code below is a second attempt at the above.  */
1878 
1879   if (GET_CODE (*x) == PLUS
1880       && GET_CODE (XEXP (*x, 0)) == REG
1881       && REGNO (XEXP (*x, 0)) == FB_REGNO
1882       && GET_CODE (XEXP (*x, 1)) == CONST_INT
1883       && (INTVAL (XEXP (*x, 1)) < -128
1884 	  || INTVAL (XEXP (*x, 1)) > (128 - GET_MODE_SIZE (mode))))
1885     {
1886       rtx sum;
1887       int offset = INTVAL (XEXP (*x, 1));
1888       int adjustment = -BIG_FB_ADJ;
1889 
1890       sum = gen_rtx_PLUS (Pmode, XEXP (*x, 0),
1891 			  GEN_INT (adjustment));
1892       *x = gen_rtx_PLUS (Pmode, sum, GEN_INT (offset - adjustment));
1893       if (type == RELOAD_OTHER)
1894 	type = RELOAD_FOR_OTHER_ADDRESS;
1895       push_reload (sum, NULL_RTX, &XEXP (*x, 0), NULL,
1896 		   A_REGS, Pmode, VOIDmode, 0, 0, opnum,
1897 		   (enum reload_type) type);
1898       return 1;
1899     }
1900 
1901   if (GET_CODE (*x) == PLUS
1902       && GET_CODE (XEXP (*x, 0)) == PLUS
1903       && GET_CODE (XEXP (XEXP (*x, 0), 0)) == REG
1904       && REGNO (XEXP (XEXP (*x, 0), 0)) == FB_REGNO
1905       && GET_CODE (XEXP (XEXP (*x, 0), 1)) == CONST_INT
1906       && GET_CODE (XEXP (*x, 1)) == CONST_INT
1907       )
1908     {
1909       if (type == RELOAD_OTHER)
1910 	type = RELOAD_FOR_OTHER_ADDRESS;
1911       push_reload (XEXP (*x, 0), NULL_RTX, &XEXP (*x, 0), NULL,
1912 		   A_REGS, Pmode, VOIDmode, 0, 0, opnum,
1913 		   (enum reload_type) type);
1914       return 1;
1915     }
1916 
1917   if (TARGET_A24 && GET_MODE (*x) == PSImode)
1918     {
1919       push_reload (*x, NULL_RTX, x, NULL,
1920 		   A_REGS, PSImode, VOIDmode, 0, 0, opnum,
1921 		   (enum reload_type) type);
1922       return 1;
1923     }
1924 
1925   return 0;
1926 }
1927 
1928 /* Return the appropriate mode for a named address pointer.  */
1929 #undef TARGET_ADDR_SPACE_POINTER_MODE
1930 #define TARGET_ADDR_SPACE_POINTER_MODE m32c_addr_space_pointer_mode
1931 static machine_mode
1932 m32c_addr_space_pointer_mode (addr_space_t addrspace)
1933 {
1934   switch (addrspace)
1935     {
1936     case ADDR_SPACE_GENERIC:
1937       return TARGET_A24 ? PSImode : HImode;
1938     case ADDR_SPACE_FAR:
1939       return SImode;
1940     default:
1941       gcc_unreachable ();
1942     }
1943 }
1944 
1945 /* Return the appropriate mode for a named address address.  */
1946 #undef TARGET_ADDR_SPACE_ADDRESS_MODE
1947 #define TARGET_ADDR_SPACE_ADDRESS_MODE m32c_addr_space_address_mode
1948 static machine_mode
1949 m32c_addr_space_address_mode (addr_space_t addrspace)
1950 {
1951   switch (addrspace)
1952     {
1953     case ADDR_SPACE_GENERIC:
1954       return TARGET_A24 ? PSImode : HImode;
1955     case ADDR_SPACE_FAR:
1956       return SImode;
1957     default:
1958       gcc_unreachable ();
1959     }
1960 }
1961 
1962 /* Like m32c_legitimate_address_p, except with named addresses.  */
1963 #undef TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P
1964 #define TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P \
1965   m32c_addr_space_legitimate_address_p
1966 static bool
1967 m32c_addr_space_legitimate_address_p (machine_mode mode, rtx x,
1968 				      bool strict, addr_space_t as)
1969 {
1970   if (as == ADDR_SPACE_FAR)
1971     {
1972       if (TARGET_A24)
1973 	return 0;
1974       encode_pattern (x);
1975       if (RTX_IS ("r"))
1976 	{
1977 	  if (GET_MODE (x) != SImode)
1978 	    return 0;
1979 	  switch (REGNO (patternr[0]))
1980 	    {
1981 	    case A0_REGNO:
1982 	      return 1;
1983 
1984 	    default:
1985 	      if (IS_PSEUDO (patternr[0], strict))
1986 		return 1;
1987 	      return 0;
1988 	    }
1989 	}
1990       if (RTX_IS ("+^Sri"))
1991 	{
1992 	  int rn = REGNO (patternr[3]);
1993 	  HOST_WIDE_INT offs = INTVAL (patternr[4]);
1994 	  if (GET_MODE (patternr[3]) != HImode)
1995 	    return 0;
1996 	  switch (rn)
1997 	    {
1998 	    case A0_REGNO:
1999 	      return (offs >= 0 && offs <= 0xfffff);
2000 
2001 	    default:
2002 	      if (IS_PSEUDO (patternr[3], strict))
2003 		return 1;
2004 	      return 0;
2005 	    }
2006 	}
2007       if (RTX_IS ("+^Srs"))
2008 	{
2009 	  int rn = REGNO (patternr[3]);
2010 	  if (GET_MODE (patternr[3]) != HImode)
2011 	    return 0;
2012 	  switch (rn)
2013 	    {
2014 	    case A0_REGNO:
2015 	      return 1;
2016 
2017 	    default:
2018 	      if (IS_PSEUDO (patternr[3], strict))
2019 		return 1;
2020 	      return 0;
2021 	    }
2022 	}
2023       if (RTX_IS ("+^S+ris"))
2024 	{
2025 	  int rn = REGNO (patternr[4]);
2026 	  if (GET_MODE (patternr[4]) != HImode)
2027 	    return 0;
2028 	  switch (rn)
2029 	    {
2030 	    case A0_REGNO:
2031 	      return 1;
2032 
2033 	    default:
2034 	      if (IS_PSEUDO (patternr[4], strict))
2035 		return 1;
2036 	      return 0;
2037 	    }
2038 	}
2039       if (RTX_IS ("s"))
2040 	{
2041 	  return 1;
2042 	}
2043       return 0;
2044     }
2045 
2046   else if (as != ADDR_SPACE_GENERIC)
2047     gcc_unreachable ();
2048 
2049   return m32c_legitimate_address_p (mode, x, strict);
2050 }
2051 
2052 /* Like m32c_legitimate_address, except with named address support.  */
2053 #undef TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS
2054 #define TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS m32c_addr_space_legitimize_address
2055 static rtx
2056 m32c_addr_space_legitimize_address (rtx x, rtx oldx, machine_mode mode,
2057 				    addr_space_t as)
2058 {
2059   if (as != ADDR_SPACE_GENERIC)
2060     {
2061 #if DEBUG0
2062       fprintf (stderr, "\033[36mm32c_addr_space_legitimize_address for mode %s\033[0m\n", mode_name[mode]);
2063       debug_rtx (x);
2064       fprintf (stderr, "\n");
2065 #endif
2066 
2067       if (GET_CODE (x) != REG)
2068 	{
2069 	  x = force_reg (SImode, x);
2070 	}
2071       return x;
2072     }
2073 
2074   return m32c_legitimize_address (x, oldx, mode);
2075 }
2076 
2077 /* Determine if one named address space is a subset of another.  */
2078 #undef TARGET_ADDR_SPACE_SUBSET_P
2079 #define TARGET_ADDR_SPACE_SUBSET_P m32c_addr_space_subset_p
2080 static bool
2081 m32c_addr_space_subset_p (addr_space_t subset, addr_space_t superset)
2082 {
2083   gcc_assert (subset == ADDR_SPACE_GENERIC || subset == ADDR_SPACE_FAR);
2084   gcc_assert (superset == ADDR_SPACE_GENERIC || superset == ADDR_SPACE_FAR);
2085 
2086   if (subset == superset)
2087     return true;
2088 
2089   else
2090     return (subset == ADDR_SPACE_GENERIC && superset == ADDR_SPACE_FAR);
2091 }
2092 
2093 #undef TARGET_ADDR_SPACE_CONVERT
2094 #define TARGET_ADDR_SPACE_CONVERT m32c_addr_space_convert
2095 /* Convert from one address space to another.  */
2096 static rtx
2097 m32c_addr_space_convert (rtx op, tree from_type, tree to_type)
2098 {
2099   addr_space_t from_as = TYPE_ADDR_SPACE (TREE_TYPE (from_type));
2100   addr_space_t to_as = TYPE_ADDR_SPACE (TREE_TYPE (to_type));
2101   rtx result;
2102 
2103   gcc_assert (from_as == ADDR_SPACE_GENERIC || from_as == ADDR_SPACE_FAR);
2104   gcc_assert (to_as == ADDR_SPACE_GENERIC || to_as == ADDR_SPACE_FAR);
2105 
2106   if (to_as == ADDR_SPACE_GENERIC && from_as == ADDR_SPACE_FAR)
2107     {
2108       /* This is unpredictable, as we're truncating off usable address
2109 	 bits.  */
2110 
2111       result = gen_reg_rtx (HImode);
2112       emit_move_insn (result, simplify_subreg (HImode, op, SImode, 0));
2113       return result;
2114     }
2115   else if (to_as == ADDR_SPACE_FAR && from_as == ADDR_SPACE_GENERIC)
2116     {
2117       /* This always works.  */
2118       result = gen_reg_rtx (SImode);
2119       emit_insn (gen_zero_extendhisi2 (result, op));
2120       return result;
2121     }
2122   else
2123     gcc_unreachable ();
2124 }
2125 
2126 /* Condition Code Status */
2127 
2128 #undef TARGET_FIXED_CONDITION_CODE_REGS
2129 #define TARGET_FIXED_CONDITION_CODE_REGS m32c_fixed_condition_code_regs
2130 static bool
2131 m32c_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
2132 {
2133   *p1 = FLG_REGNO;
2134   *p2 = INVALID_REGNUM;
2135   return true;
2136 }
2137 
2138 /* Describing Relative Costs of Operations */
2139 
2140 /* Implements TARGET_REGISTER_MOVE_COST.  We make impossible moves
2141    prohibitively expensive, like trying to put QIs in r2/r3 (there are
2142    no opcodes to do that).  We also discourage use of mem* registers
2143    since they're really memory.  */
2144 
2145 #undef TARGET_REGISTER_MOVE_COST
2146 #define TARGET_REGISTER_MOVE_COST m32c_register_move_cost
2147 
2148 static int
2149 m32c_register_move_cost (machine_mode mode, reg_class_t from,
2150 			 reg_class_t to)
2151 {
2152   int cost = COSTS_N_INSNS (3);
2153   HARD_REG_SET cc;
2154 
2155 /* FIXME: pick real values, but not 2 for now.  */
2156   COPY_HARD_REG_SET (cc, reg_class_contents[(int) from]);
2157   IOR_HARD_REG_SET (cc, reg_class_contents[(int) to]);
2158 
2159   if (mode == QImode
2160       && hard_reg_set_intersect_p (cc, reg_class_contents[R23_REGS]))
2161     {
2162       if (hard_reg_set_subset_p (cc, reg_class_contents[R23_REGS]))
2163 	cost = COSTS_N_INSNS (1000);
2164       else
2165 	cost = COSTS_N_INSNS (80);
2166     }
2167 
2168   if (!class_can_hold_mode (from, mode) || !class_can_hold_mode (to, mode))
2169     cost = COSTS_N_INSNS (1000);
2170 
2171   if (reg_classes_intersect_p (from, CR_REGS))
2172     cost += COSTS_N_INSNS (5);
2173 
2174   if (reg_classes_intersect_p (to, CR_REGS))
2175     cost += COSTS_N_INSNS (5);
2176 
2177   if (from == MEM_REGS || to == MEM_REGS)
2178     cost += COSTS_N_INSNS (50);
2179   else if (reg_classes_intersect_p (from, MEM_REGS)
2180 	   || reg_classes_intersect_p (to, MEM_REGS))
2181     cost += COSTS_N_INSNS (10);
2182 
2183 #if DEBUG0
2184   fprintf (stderr, "register_move_cost %s from %s to %s = %d\n",
2185 	   mode_name[mode], class_names[(int) from], class_names[(int) to],
2186 	   cost);
2187 #endif
2188   return cost;
2189 }
2190 
2191 /*  Implements TARGET_MEMORY_MOVE_COST.  */
2192 
2193 #undef TARGET_MEMORY_MOVE_COST
2194 #define TARGET_MEMORY_MOVE_COST m32c_memory_move_cost
2195 
2196 static int
2197 m32c_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
2198 		       reg_class_t rclass ATTRIBUTE_UNUSED,
2199 		       bool in ATTRIBUTE_UNUSED)
2200 {
2201   /* FIXME: pick real values.  */
2202   return COSTS_N_INSNS (10);
2203 }
2204 
2205 /* Here we try to describe when we use multiple opcodes for one RTX so
2206    that gcc knows when to use them.  */
2207 #undef TARGET_RTX_COSTS
2208 #define TARGET_RTX_COSTS m32c_rtx_costs
2209 static bool
2210 m32c_rtx_costs (rtx x, machine_mode mode, int outer_code,
2211 		int opno ATTRIBUTE_UNUSED,
2212 		int *total, bool speed ATTRIBUTE_UNUSED)
2213 {
2214   int code = GET_CODE (x);
2215   switch (code)
2216     {
2217     case REG:
2218       if (REGNO (x) >= MEM0_REGNO && REGNO (x) <= MEM7_REGNO)
2219 	*total += COSTS_N_INSNS (500);
2220       else
2221 	*total += COSTS_N_INSNS (1);
2222       return true;
2223 
2224     case ASHIFT:
2225     case LSHIFTRT:
2226     case ASHIFTRT:
2227       if (GET_CODE (XEXP (x, 1)) != CONST_INT)
2228 	{
2229 	  /* mov.b r1l, r1h */
2230 	  *total +=  COSTS_N_INSNS (1);
2231 	  return true;
2232 	}
2233       if (INTVAL (XEXP (x, 1)) > 8
2234 	  || INTVAL (XEXP (x, 1)) < -8)
2235 	{
2236 	  /* mov.b #N, r1l */
2237 	  /* mov.b r1l, r1h */
2238 	  *total +=  COSTS_N_INSNS (2);
2239 	  return true;
2240 	}
2241       return true;
2242 
2243     case LE:
2244     case LEU:
2245     case LT:
2246     case LTU:
2247     case GT:
2248     case GTU:
2249     case GE:
2250     case GEU:
2251     case NE:
2252     case EQ:
2253       if (outer_code == SET)
2254 	{
2255 	  *total += COSTS_N_INSNS (2);
2256 	  return true;
2257 	}
2258       break;
2259 
2260     case ZERO_EXTRACT:
2261       {
2262 	rtx dest = XEXP (x, 0);
2263 	rtx addr = XEXP (dest, 0);
2264 	switch (GET_CODE (addr))
2265 	  {
2266 	  case CONST_INT:
2267 	    *total += COSTS_N_INSNS (1);
2268 	    break;
2269 	  case SYMBOL_REF:
2270 	    *total += COSTS_N_INSNS (3);
2271 	    break;
2272 	  default:
2273 	    *total += COSTS_N_INSNS (2);
2274 	    break;
2275 	  }
2276 	return true;
2277       }
2278       break;
2279 
2280     default:
2281       /* Reasonable default.  */
2282       if (TARGET_A16 && mode == SImode)
2283 	*total += COSTS_N_INSNS (2);
2284       break;
2285     }
2286   return false;
2287 }
2288 
2289 #undef TARGET_ADDRESS_COST
2290 #define TARGET_ADDRESS_COST m32c_address_cost
2291 static int
2292 m32c_address_cost (rtx addr, machine_mode mode ATTRIBUTE_UNUSED,
2293 		   addr_space_t as ATTRIBUTE_UNUSED,
2294 		   bool speed ATTRIBUTE_UNUSED)
2295 {
2296   int i;
2297   /*  fprintf(stderr, "\naddress_cost\n");
2298       debug_rtx(addr);*/
2299   switch (GET_CODE (addr))
2300     {
2301     case CONST_INT:
2302       i = INTVAL (addr);
2303       if (i == 0)
2304 	return COSTS_N_INSNS(1);
2305       if (0 < i && i <= 255)
2306 	return COSTS_N_INSNS(2);
2307       if (0 < i && i <= 65535)
2308 	return COSTS_N_INSNS(3);
2309       return COSTS_N_INSNS(4);
2310     case SYMBOL_REF:
2311       return COSTS_N_INSNS(4);
2312     case REG:
2313       return COSTS_N_INSNS(1);
2314     case PLUS:
2315       if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
2316 	{
2317 	  i = INTVAL (XEXP (addr, 1));
2318 	  if (i == 0)
2319 	    return COSTS_N_INSNS(1);
2320 	  if (0 < i && i <= 255)
2321 	    return COSTS_N_INSNS(2);
2322 	  if (0 < i && i <= 65535)
2323 	    return COSTS_N_INSNS(3);
2324 	}
2325       return COSTS_N_INSNS(4);
2326     default:
2327       return 0;
2328     }
2329 }
2330 
2331 /* Defining the Output Assembler Language */
2332 
2333 /* Output of Data */
2334 
2335 /* We may have 24 bit sizes, which is the native address size.
2336    Currently unused, but provided for completeness.  */
2337 #undef TARGET_ASM_INTEGER
2338 #define TARGET_ASM_INTEGER m32c_asm_integer
2339 static bool
2340 m32c_asm_integer (rtx x, unsigned int size, int aligned_p)
2341 {
2342   switch (size)
2343     {
2344     case 3:
2345       fprintf (asm_out_file, "\t.3byte\t");
2346       output_addr_const (asm_out_file, x);
2347       fputc ('\n', asm_out_file);
2348       return true;
2349     case 4:
2350       if (GET_CODE (x) == SYMBOL_REF)
2351 	{
2352 	  fprintf (asm_out_file, "\t.long\t");
2353 	  output_addr_const (asm_out_file, x);
2354 	  fputc ('\n', asm_out_file);
2355 	  return true;
2356 	}
2357       break;
2358     }
2359   return default_assemble_integer (x, size, aligned_p);
2360 }
2361 
2362 /* Output of Assembler Instructions */
2363 
2364 /* We use a lookup table because the addressing modes are non-orthogonal.  */
2365 
2366 static struct
2367 {
2368   char code;
2369   char const *pattern;
2370   char const *format;
2371 }
2372 const conversions[] = {
2373   { 0, "r", "0" },
2374 
2375   { 0, "mr", "z[1]" },
2376   { 0, "m+ri", "3[2]" },
2377   { 0, "m+rs", "3[2]" },
2378   { 0, "m+^Zrs", "5[4]" },
2379   { 0, "m+^Zri", "5[4]" },
2380   { 0, "m+^Z+ris", "7+6[5]" },
2381   { 0, "m+^Srs", "5[4]" },
2382   { 0, "m+^Sri", "5[4]" },
2383   { 0, "m+^S+ris", "7+6[5]" },
2384   { 0, "m+r+si", "4+5[2]" },
2385   { 0, "ms", "1" },
2386   { 0, "mi", "1" },
2387   { 0, "m+si", "2+3" },
2388 
2389   { 0, "mmr", "[z[2]]" },
2390   { 0, "mm+ri", "[4[3]]" },
2391   { 0, "mm+rs", "[4[3]]" },
2392   { 0, "mm+r+si", "[5+6[3]]" },
2393   { 0, "mms", "[[2]]" },
2394   { 0, "mmi", "[[2]]" },
2395   { 0, "mm+si", "[4[3]]" },
2396 
2397   { 0, "i", "#0" },
2398   { 0, "s", "#0" },
2399   { 0, "+si", "#1+2" },
2400   { 0, "l", "#0" },
2401 
2402   { 'l', "l", "0" },
2403   { 'd', "i", "0" },
2404   { 'd', "s", "0" },
2405   { 'd', "+si", "1+2" },
2406   { 'D', "i", "0" },
2407   { 'D', "s", "0" },
2408   { 'D', "+si", "1+2" },
2409   { 'x', "i", "#0" },
2410   { 'X', "i", "#0" },
2411   { 'm', "i", "#0" },
2412   { 'b', "i", "#0" },
2413   { 'B', "i", "0" },
2414   { 'p', "i", "0" },
2415 
2416   { 0, 0, 0 }
2417 };
2418 
2419 /* This is in order according to the bitfield that pushm/popm use.  */
2420 static char const *pushm_regs[] = {
2421   "fb", "sb", "a1", "a0", "r3", "r2", "r1", "r0"
2422 };
2423 
2424 /* Implements TARGET_PRINT_OPERAND.  */
2425 
2426 #undef TARGET_PRINT_OPERAND
2427 #define TARGET_PRINT_OPERAND m32c_print_operand
2428 
2429 static void
2430 m32c_print_operand (FILE * file, rtx x, int code)
2431 {
2432   int i, j, b;
2433   const char *comma;
2434   HOST_WIDE_INT ival;
2435   int unsigned_const = 0;
2436   int force_sign;
2437 
2438   /* Multiplies; constants are converted to sign-extended format but
2439    we need unsigned, so 'u' and 'U' tell us what size unsigned we
2440    need.  */
2441   if (code == 'u')
2442     {
2443       unsigned_const = 2;
2444       code = 0;
2445     }
2446   if (code == 'U')
2447     {
2448       unsigned_const = 1;
2449       code = 0;
2450     }
2451   /* This one is only for debugging; you can put it in a pattern to
2452      force this error.  */
2453   if (code == '!')
2454     {
2455       fprintf (stderr, "dj: unreviewed pattern:");
2456       if (current_output_insn)
2457 	debug_rtx (current_output_insn);
2458       gcc_unreachable ();
2459     }
2460   /* PSImode operations are either .w or .l depending on the target.  */
2461   if (code == '&')
2462     {
2463       if (TARGET_A16)
2464 	fprintf (file, "w");
2465       else
2466 	fprintf (file, "l");
2467       return;
2468     }
2469   /* Inverted conditionals.  */
2470   if (code == 'C')
2471     {
2472       switch (GET_CODE (x))
2473 	{
2474 	case LE:
2475 	  fputs ("gt", file);
2476 	  break;
2477 	case LEU:
2478 	  fputs ("gtu", file);
2479 	  break;
2480 	case LT:
2481 	  fputs ("ge", file);
2482 	  break;
2483 	case LTU:
2484 	  fputs ("geu", file);
2485 	  break;
2486 	case GT:
2487 	  fputs ("le", file);
2488 	  break;
2489 	case GTU:
2490 	  fputs ("leu", file);
2491 	  break;
2492 	case GE:
2493 	  fputs ("lt", file);
2494 	  break;
2495 	case GEU:
2496 	  fputs ("ltu", file);
2497 	  break;
2498 	case NE:
2499 	  fputs ("eq", file);
2500 	  break;
2501 	case EQ:
2502 	  fputs ("ne", file);
2503 	  break;
2504 	default:
2505 	  gcc_unreachable ();
2506 	}
2507       return;
2508     }
2509   /* Regular conditionals.  */
2510   if (code == 'c')
2511     {
2512       switch (GET_CODE (x))
2513 	{
2514 	case LE:
2515 	  fputs ("le", file);
2516 	  break;
2517 	case LEU:
2518 	  fputs ("leu", file);
2519 	  break;
2520 	case LT:
2521 	  fputs ("lt", file);
2522 	  break;
2523 	case LTU:
2524 	  fputs ("ltu", file);
2525 	  break;
2526 	case GT:
2527 	  fputs ("gt", file);
2528 	  break;
2529 	case GTU:
2530 	  fputs ("gtu", file);
2531 	  break;
2532 	case GE:
2533 	  fputs ("ge", file);
2534 	  break;
2535 	case GEU:
2536 	  fputs ("geu", file);
2537 	  break;
2538 	case NE:
2539 	  fputs ("ne", file);
2540 	  break;
2541 	case EQ:
2542 	  fputs ("eq", file);
2543 	  break;
2544 	default:
2545 	  gcc_unreachable ();
2546 	}
2547       return;
2548     }
2549   /* Used in negsi2 to do HImode ops on the two parts of an SImode
2550      operand.  */
2551   if (code == 'h' && GET_MODE (x) == SImode)
2552     {
2553       x = m32c_subreg (HImode, x, SImode, 0);
2554       code = 0;
2555     }
2556   if (code == 'H' && GET_MODE (x) == SImode)
2557     {
2558       x = m32c_subreg (HImode, x, SImode, 2);
2559       code = 0;
2560     }
2561   if (code == 'h' && GET_MODE (x) == HImode)
2562     {
2563       x = m32c_subreg (QImode, x, HImode, 0);
2564       code = 0;
2565     }
2566   if (code == 'H' && GET_MODE (x) == HImode)
2567     {
2568       /* We can't actually represent this as an rtx.  Do it here.  */
2569       if (GET_CODE (x) == REG)
2570 	{
2571 	  switch (REGNO (x))
2572 	    {
2573 	    case R0_REGNO:
2574 	      fputs ("r0h", file);
2575 	      return;
2576 	    case R1_REGNO:
2577 	      fputs ("r1h", file);
2578 	      return;
2579 	    default:
2580 	      gcc_unreachable();
2581 	    }
2582 	}
2583       /* This should be a MEM.  */
2584       x = m32c_subreg (QImode, x, HImode, 1);
2585       code = 0;
2586     }
2587   /* This is for BMcond, which always wants word register names.  */
2588   if (code == 'h' && GET_MODE (x) == QImode)
2589     {
2590       if (GET_CODE (x) == REG)
2591 	x = gen_rtx_REG (HImode, REGNO (x));
2592       code = 0;
2593     }
2594   /* 'x' and 'X' need to be ignored for non-immediates.  */
2595   if ((code == 'x' || code == 'X') && GET_CODE (x) != CONST_INT)
2596     code = 0;
2597 
2598   encode_pattern (x);
2599   force_sign = 0;
2600   for (i = 0; conversions[i].pattern; i++)
2601     if (conversions[i].code == code
2602 	&& streq (conversions[i].pattern, pattern))
2603       {
2604 	for (j = 0; conversions[i].format[j]; j++)
2605 	  /* backslash quotes the next character in the output pattern.  */
2606 	  if (conversions[i].format[j] == '\\')
2607 	    {
2608 	      fputc (conversions[i].format[j + 1], file);
2609 	      j++;
2610 	    }
2611 	  /* Digits in the output pattern indicate that the
2612 	     corresponding RTX is to be output at that point.  */
2613 	  else if (ISDIGIT (conversions[i].format[j]))
2614 	    {
2615 	      rtx r = patternr[conversions[i].format[j] - '0'];
2616 	      switch (GET_CODE (r))
2617 		{
2618 		case REG:
2619 		  fprintf (file, "%s",
2620 			   reg_name_with_mode (REGNO (r), GET_MODE (r)));
2621 		  break;
2622 		case CONST_INT:
2623 		  switch (code)
2624 		    {
2625 		    case 'b':
2626 		    case 'B':
2627 		      {
2628 			int v = INTVAL (r);
2629 			int i = (int) exact_log2 (v);
2630 			if (i == -1)
2631 			  i = (int) exact_log2 ((v ^ 0xffff) & 0xffff);
2632 			if (i == -1)
2633 			  i = (int) exact_log2 ((v ^ 0xff) & 0xff);
2634 			/* Bit position.  */
2635 			fprintf (file, "%d", i);
2636 		      }
2637 		      break;
2638 		    case 'x':
2639 		      /* Unsigned byte.  */
2640 		      fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2641 			       INTVAL (r) & 0xff);
2642 		      break;
2643 		    case 'X':
2644 		      /* Unsigned word.  */
2645 		      fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2646 			       INTVAL (r) & 0xffff);
2647 		      break;
2648 		    case 'p':
2649 		      /* pushm and popm encode a register set into a single byte.  */
2650 		      comma = "";
2651 		      for (b = 7; b >= 0; b--)
2652 			if (INTVAL (r) & (1 << b))
2653 			  {
2654 			    fprintf (file, "%s%s", comma, pushm_regs[b]);
2655 			    comma = ",";
2656 			  }
2657 		      break;
2658 		    case 'm':
2659 		      /* "Minus".  Output -X  */
2660 		      ival = (-INTVAL (r) & 0xffff);
2661 		      if (ival & 0x8000)
2662 			ival = ival - 0x10000;
2663 		      fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2664 		      break;
2665 		    default:
2666 		      ival = INTVAL (r);
2667 		      if (conversions[i].format[j + 1] == '[' && ival < 0)
2668 			{
2669 			  /* We can simulate negative displacements by
2670 			     taking advantage of address space
2671 			     wrapping when the offset can span the
2672 			     entire address range.  */
2673 			  rtx base =
2674 			    patternr[conversions[i].format[j + 2] - '0'];
2675 			  if (GET_CODE (base) == REG)
2676 			    switch (REGNO (base))
2677 			      {
2678 			      case A0_REGNO:
2679 			      case A1_REGNO:
2680 				if (TARGET_A24)
2681 				  ival = 0x1000000 + ival;
2682 				else
2683 				  ival = 0x10000 + ival;
2684 				break;
2685 			      case SB_REGNO:
2686 				if (TARGET_A16)
2687 				  ival = 0x10000 + ival;
2688 				break;
2689 			      }
2690 			}
2691 		      else if (code == 'd' && ival < 0 && j == 0)
2692 			/* The "mova" opcode is used to do addition by
2693 			   computing displacements, but again, we need
2694 			   displacements to be unsigned *if* they're
2695 			   the only component of the displacement
2696 			   (i.e. no "symbol-4" type displacement).  */
2697 			ival = (TARGET_A24 ? 0x1000000 : 0x10000) + ival;
2698 
2699 		      if (conversions[i].format[j] == '0')
2700 			{
2701 			  /* More conversions to unsigned.  */
2702 			  if (unsigned_const == 2)
2703 			    ival &= 0xffff;
2704 			  if (unsigned_const == 1)
2705 			    ival &= 0xff;
2706 			}
2707 		      if (streq (conversions[i].pattern, "mi")
2708 			  || streq (conversions[i].pattern, "mmi"))
2709 			{
2710 			  /* Integers used as addresses are unsigned.  */
2711 			  ival &= (TARGET_A24 ? 0xffffff : 0xffff);
2712 			}
2713 		      if (force_sign && ival >= 0)
2714 			fputc ('+', file);
2715 		      fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2716 		      break;
2717 		    }
2718 		  break;
2719 		case CONST_DOUBLE:
2720 		  /* We don't have const_double constants.  If it
2721 		     happens, make it obvious.  */
2722 		  fprintf (file, "[const_double 0x%lx]",
2723 			   (unsigned long) CONST_DOUBLE_HIGH (r));
2724 		  break;
2725 		case SYMBOL_REF:
2726 		  assemble_name (file, XSTR (r, 0));
2727 		  break;
2728 		case LABEL_REF:
2729 		  output_asm_label (r);
2730 		  break;
2731 		default:
2732 		  fprintf (stderr, "don't know how to print this operand:");
2733 		  debug_rtx (r);
2734 		  gcc_unreachable ();
2735 		}
2736 	    }
2737 	  else
2738 	    {
2739 	      if (conversions[i].format[j] == 'z')
2740 		{
2741 		  /* Some addressing modes *must* have a displacement,
2742 		     so insert a zero here if needed.  */
2743 		  int k;
2744 		  for (k = j + 1; conversions[i].format[k]; k++)
2745 		    if (ISDIGIT (conversions[i].format[k]))
2746 		      {
2747 			rtx reg = patternr[conversions[i].format[k] - '0'];
2748 			if (GET_CODE (reg) == REG
2749 			    && (REGNO (reg) == SB_REGNO
2750 				|| REGNO (reg) == FB_REGNO
2751 				|| REGNO (reg) == SP_REGNO))
2752 			  fputc ('0', file);
2753 		      }
2754 		  continue;
2755 		}
2756 	      /* Signed displacements off symbols need to have signs
2757 		 blended cleanly.  */
2758 	      if (conversions[i].format[j] == '+'
2759 		  && (!code || code == 'D' || code == 'd')
2760 		  && ISDIGIT (conversions[i].format[j + 1])
2761 		  && (GET_CODE (patternr[conversions[i].format[j + 1] - '0'])
2762 		      == CONST_INT))
2763 		{
2764 		  force_sign = 1;
2765 		  continue;
2766 		}
2767 	      fputc (conversions[i].format[j], file);
2768 	    }
2769 	break;
2770       }
2771   if (!conversions[i].pattern)
2772     {
2773       fprintf (stderr, "unconvertible operand %c `%s'", code ? code : '-',
2774 	       pattern);
2775       debug_rtx (x);
2776       fprintf (file, "[%c.%s]", code ? code : '-', pattern);
2777     }
2778 
2779   return;
2780 }
2781 
2782 /* Implements TARGET_PRINT_OPERAND_PUNCT_VALID_P.
2783 
2784    See m32c_print_operand above for descriptions of what these do.  */
2785 
2786 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
2787 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P m32c_print_operand_punct_valid_p
2788 
2789 static bool
2790 m32c_print_operand_punct_valid_p (unsigned char c)
2791 {
2792   if (c == '&' || c == '!')
2793     return true;
2794 
2795   return false;
2796 }
2797 
2798 /* Implements TARGET_PRINT_OPERAND_ADDRESS.  Nothing unusual here.  */
2799 
2800 #undef TARGET_PRINT_OPERAND_ADDRESS
2801 #define TARGET_PRINT_OPERAND_ADDRESS m32c_print_operand_address
2802 
2803 static void
2804 m32c_print_operand_address (FILE * stream, machine_mode /*mode*/, rtx address)
2805 {
2806   if (GET_CODE (address) == MEM)
2807     address = XEXP (address, 0);
2808   else
2809     /* cf: gcc.dg/asm-4.c.  */
2810     gcc_assert (GET_CODE (address) == REG);
2811 
2812   m32c_print_operand (stream, address, 0);
2813 }
2814 
2815 /* Implements ASM_OUTPUT_REG_PUSH.  Control registers are pushed
2816    differently than general registers.  */
2817 void
2818 m32c_output_reg_push (FILE * s, int regno)
2819 {
2820   if (regno == FLG_REGNO)
2821     fprintf (s, "\tpushc\tflg\n");
2822   else
2823     fprintf (s, "\tpush.%c\t%s\n",
2824 	     " bwll"[reg_push_size (regno)], reg_names[regno]);
2825 }
2826 
2827 /* Likewise for ASM_OUTPUT_REG_POP.  */
2828 void
2829 m32c_output_reg_pop (FILE * s, int regno)
2830 {
2831   if (regno == FLG_REGNO)
2832     fprintf (s, "\tpopc\tflg\n");
2833   else
2834     fprintf (s, "\tpop.%c\t%s\n",
2835 	     " bwll"[reg_push_size (regno)], reg_names[regno]);
2836 }
2837 
2838 /* Defining target-specific uses of `__attribute__' */
2839 
2840 /* Used to simplify the logic below.  Find the attributes wherever
2841    they may be.  */
2842 #define M32C_ATTRIBUTES(decl) \
2843   (TYPE_P (decl)) ? TYPE_ATTRIBUTES (decl) \
2844                 : DECL_ATTRIBUTES (decl) \
2845                   ? (DECL_ATTRIBUTES (decl)) \
2846 		  : TYPE_ATTRIBUTES (TREE_TYPE (decl))
2847 
2848 /* Returns TRUE if the given tree has the "interrupt" attribute.  */
2849 static int
2850 interrupt_p (tree node ATTRIBUTE_UNUSED)
2851 {
2852   tree list = M32C_ATTRIBUTES (node);
2853   while (list)
2854     {
2855       if (is_attribute_p ("interrupt", TREE_PURPOSE (list)))
2856 	return 1;
2857       list = TREE_CHAIN (list);
2858     }
2859   return fast_interrupt_p (node);
2860 }
2861 
2862 /* Returns TRUE if the given tree has the "bank_switch" attribute.  */
2863 static int
2864 bank_switch_p (tree node ATTRIBUTE_UNUSED)
2865 {
2866   tree list = M32C_ATTRIBUTES (node);
2867   while (list)
2868     {
2869       if (is_attribute_p ("bank_switch", TREE_PURPOSE (list)))
2870 	return 1;
2871       list = TREE_CHAIN (list);
2872     }
2873   return 0;
2874 }
2875 
2876 /* Returns TRUE if the given tree has the "fast_interrupt" attribute.  */
2877 static int
2878 fast_interrupt_p (tree node ATTRIBUTE_UNUSED)
2879 {
2880   tree list = M32C_ATTRIBUTES (node);
2881   while (list)
2882     {
2883       if (is_attribute_p ("fast_interrupt", TREE_PURPOSE (list)))
2884 	return 1;
2885       list = TREE_CHAIN (list);
2886     }
2887   return 0;
2888 }
2889 
2890 static tree
2891 interrupt_handler (tree * node ATTRIBUTE_UNUSED,
2892 		   tree name ATTRIBUTE_UNUSED,
2893 		   tree args ATTRIBUTE_UNUSED,
2894 		   int flags ATTRIBUTE_UNUSED,
2895 		   bool * no_add_attrs ATTRIBUTE_UNUSED)
2896 {
2897   return NULL_TREE;
2898 }
2899 
2900 /* Returns TRUE if given tree has the "function_vector" attribute. */
2901 int
2902 m32c_special_page_vector_p (tree func)
2903 {
2904   tree list;
2905 
2906   if (TREE_CODE (func) != FUNCTION_DECL)
2907     return 0;
2908 
2909   list = M32C_ATTRIBUTES (func);
2910   while (list)
2911     {
2912       if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
2913         return 1;
2914       list = TREE_CHAIN (list);
2915     }
2916   return 0;
2917 }
2918 
2919 static tree
2920 function_vector_handler (tree * node ATTRIBUTE_UNUSED,
2921                          tree name ATTRIBUTE_UNUSED,
2922                          tree args ATTRIBUTE_UNUSED,
2923                          int flags ATTRIBUTE_UNUSED,
2924                          bool * no_add_attrs ATTRIBUTE_UNUSED)
2925 {
2926   if (TARGET_R8C)
2927     {
2928       /* The attribute is not supported for R8C target.  */
2929       warning (OPT_Wattributes,
2930                 "%qE attribute is not supported for R8C target",
2931                 name);
2932       *no_add_attrs = true;
2933     }
2934   else if (TREE_CODE (*node) != FUNCTION_DECL)
2935     {
2936       /* The attribute must be applied to functions only.  */
2937       warning (OPT_Wattributes,
2938                 "%qE attribute applies only to functions",
2939                 name);
2940       *no_add_attrs = true;
2941     }
2942   else if (TREE_CODE (TREE_VALUE (args)) != INTEGER_CST)
2943     {
2944       /* The argument must be a constant integer.  */
2945       warning (OPT_Wattributes,
2946                 "%qE attribute argument not an integer constant",
2947                 name);
2948       *no_add_attrs = true;
2949     }
2950   else if (TREE_INT_CST_LOW (TREE_VALUE (args)) < 18
2951            || TREE_INT_CST_LOW (TREE_VALUE (args)) > 255)
2952     {
2953       /* The argument value must be between 18 to 255.  */
2954       warning (OPT_Wattributes,
2955                 "%qE attribute argument should be between 18 to 255",
2956                 name);
2957       *no_add_attrs = true;
2958     }
2959   return NULL_TREE;
2960 }
2961 
2962 /* If the function is assigned the attribute 'function_vector', it
2963    returns the function vector number, otherwise returns zero.  */
2964 int
2965 current_function_special_page_vector (rtx x)
2966 {
2967   int num;
2968 
2969   if ((GET_CODE(x) == SYMBOL_REF)
2970       && (SYMBOL_REF_FLAGS (x) & SYMBOL_FLAG_FUNCVEC_FUNCTION))
2971     {
2972       tree list;
2973       tree t = SYMBOL_REF_DECL (x);
2974 
2975       if (TREE_CODE (t) != FUNCTION_DECL)
2976         return 0;
2977 
2978       list = M32C_ATTRIBUTES (t);
2979       while (list)
2980         {
2981           if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
2982             {
2983               num = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (list)));
2984               return num;
2985             }
2986 
2987           list = TREE_CHAIN (list);
2988         }
2989 
2990       return 0;
2991     }
2992   else
2993     return 0;
2994 }
2995 
2996 #undef TARGET_ATTRIBUTE_TABLE
2997 #define TARGET_ATTRIBUTE_TABLE m32c_attribute_table
2998 static const struct attribute_spec m32c_attribute_table[] = {
2999   {"interrupt", 0, 0, false, false, false, interrupt_handler, false},
3000   {"bank_switch", 0, 0, false, false, false, interrupt_handler, false},
3001   {"fast_interrupt", 0, 0, false, false, false, interrupt_handler, false},
3002   {"function_vector", 1, 1, true,  false, false, function_vector_handler,
3003    false},
3004   {0, 0, 0, 0, 0, 0, 0, false}
3005 };
3006 
3007 #undef TARGET_COMP_TYPE_ATTRIBUTES
3008 #define TARGET_COMP_TYPE_ATTRIBUTES m32c_comp_type_attributes
3009 static int
3010 m32c_comp_type_attributes (const_tree type1 ATTRIBUTE_UNUSED,
3011 			   const_tree type2 ATTRIBUTE_UNUSED)
3012 {
3013   /* 0=incompatible 1=compatible 2=warning */
3014   return 1;
3015 }
3016 
3017 #undef TARGET_INSERT_ATTRIBUTES
3018 #define TARGET_INSERT_ATTRIBUTES m32c_insert_attributes
3019 static void
3020 m32c_insert_attributes (tree node ATTRIBUTE_UNUSED,
3021 			tree * attr_ptr ATTRIBUTE_UNUSED)
3022 {
3023   unsigned addr;
3024   /* See if we need to make #pragma address variables volatile.  */
3025 
3026   if (TREE_CODE (node) == VAR_DECL)
3027     {
3028       const char *name = IDENTIFIER_POINTER (DECL_NAME (node));
3029       if (m32c_get_pragma_address  (name, &addr))
3030 	{
3031 	  TREE_THIS_VOLATILE (node) = true;
3032 	}
3033     }
3034 }
3035 
3036 /* Hash table of pragma info.  */
3037 static GTY(()) hash_map<nofree_string_hash, unsigned> *pragma_htab;
3038 
3039 void
3040 m32c_note_pragma_address (const char *varname, unsigned address)
3041 {
3042   if (!pragma_htab)
3043     pragma_htab = hash_map<nofree_string_hash, unsigned>::create_ggc (31);
3044 
3045   const char *name = ggc_strdup (varname);
3046   unsigned int *slot = &pragma_htab->get_or_insert (name);
3047   *slot = address;
3048 }
3049 
3050 static bool
3051 m32c_get_pragma_address (const char *varname, unsigned *address)
3052 {
3053   if (!pragma_htab)
3054     return false;
3055 
3056   unsigned int *slot = pragma_htab->get (varname);
3057   if (slot)
3058     {
3059       *address = *slot;
3060       return true;
3061     }
3062   return false;
3063 }
3064 
3065 void
3066 m32c_output_aligned_common (FILE *stream, tree decl ATTRIBUTE_UNUSED,
3067 			    const char *name,
3068 			    int size, int align, int global)
3069 {
3070   unsigned address;
3071 
3072   if (m32c_get_pragma_address (name, &address))
3073     {
3074       /* We never output these as global.  */
3075       assemble_name (stream, name);
3076       fprintf (stream, " = 0x%04x\n", address);
3077       return;
3078     }
3079   if (!global)
3080     {
3081       fprintf (stream, "\t.local\t");
3082       assemble_name (stream, name);
3083       fprintf (stream, "\n");
3084     }
3085   fprintf (stream, "\t.comm\t");
3086   assemble_name (stream, name);
3087   fprintf (stream, ",%u,%u\n", size, align / BITS_PER_UNIT);
3088 }
3089 
3090 /* Predicates */
3091 
3092 /* This is a list of legal subregs of hard regs.  */
3093 static const struct {
3094   unsigned char outer_mode_size;
3095   unsigned char inner_mode_size;
3096   unsigned char byte_mask;
3097   unsigned char legal_when;
3098   unsigned int regno;
3099 } legal_subregs[] = {
3100   {1, 2, 0x03, 1, R0_REGNO}, /* r0h r0l */
3101   {1, 2, 0x03, 1, R1_REGNO}, /* r1h r1l */
3102   {1, 2, 0x01, 1, A0_REGNO},
3103   {1, 2, 0x01, 1, A1_REGNO},
3104 
3105   {1, 4, 0x01, 1, A0_REGNO},
3106   {1, 4, 0x01, 1, A1_REGNO},
3107 
3108   {2, 4, 0x05, 1, R0_REGNO}, /* r2 r0 */
3109   {2, 4, 0x05, 1, R1_REGNO}, /* r3 r1 */
3110   {2, 4, 0x05, 16, A0_REGNO}, /* a1 a0 */
3111   {2, 4, 0x01, 24, A0_REGNO}, /* a1 a0 */
3112   {2, 4, 0x01, 24, A1_REGNO}, /* a1 a0 */
3113 
3114   {4, 8, 0x55, 1, R0_REGNO}, /* r3 r1 r2 r0 */
3115 };
3116 
3117 /* Returns TRUE if OP is a subreg of a hard reg which we don't
3118    support.  We also bail on MEMs with illegal addresses.  */
3119 bool
3120 m32c_illegal_subreg_p (rtx op)
3121 {
3122   int offset;
3123   unsigned int i;
3124   machine_mode src_mode, dest_mode;
3125 
3126   if (GET_CODE (op) == MEM
3127       && ! m32c_legitimate_address_p (Pmode, XEXP (op, 0), false))
3128     {
3129       return true;
3130     }
3131 
3132   if (GET_CODE (op) != SUBREG)
3133     return false;
3134 
3135   dest_mode = GET_MODE (op);
3136   offset = SUBREG_BYTE (op);
3137   op = SUBREG_REG (op);
3138   src_mode = GET_MODE (op);
3139 
3140   if (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (src_mode))
3141     return false;
3142   if (GET_CODE (op) != REG)
3143     return false;
3144   if (REGNO (op) >= MEM0_REGNO)
3145     return false;
3146 
3147   offset = (1 << offset);
3148 
3149   for (i = 0; i < ARRAY_SIZE (legal_subregs); i ++)
3150     if (legal_subregs[i].outer_mode_size == GET_MODE_SIZE (dest_mode)
3151 	&& legal_subregs[i].regno == REGNO (op)
3152 	&& legal_subregs[i].inner_mode_size == GET_MODE_SIZE (src_mode)
3153 	&& legal_subregs[i].byte_mask & offset)
3154       {
3155 	switch (legal_subregs[i].legal_when)
3156 	  {
3157 	  case 1:
3158 	    return false;
3159 	  case 16:
3160 	    if (TARGET_A16)
3161 	      return false;
3162 	    break;
3163 	  case 24:
3164 	    if (TARGET_A24)
3165 	      return false;
3166 	    break;
3167 	  }
3168       }
3169   return true;
3170 }
3171 
3172 /* Returns TRUE if we support a move between the first two operands.
3173    At the moment, we just want to discourage mem to mem moves until
3174    after reload, because reload has a hard time with our limited
3175    number of address registers, and we can get into a situation where
3176    we need three of them when we only have two.  */
3177 bool
3178 m32c_mov_ok (rtx * operands, machine_mode mode ATTRIBUTE_UNUSED)
3179 {
3180   rtx op0 = operands[0];
3181   rtx op1 = operands[1];
3182 
3183   if (TARGET_A24)
3184     return true;
3185 
3186 #define DEBUG_MOV_OK 0
3187 #if DEBUG_MOV_OK
3188   fprintf (stderr, "m32c_mov_ok %s\n", mode_name[mode]);
3189   debug_rtx (op0);
3190   debug_rtx (op1);
3191 #endif
3192 
3193   if (GET_CODE (op0) == SUBREG)
3194     op0 = XEXP (op0, 0);
3195   if (GET_CODE (op1) == SUBREG)
3196     op1 = XEXP (op1, 0);
3197 
3198   if (GET_CODE (op0) == MEM
3199       && GET_CODE (op1) == MEM
3200       && ! reload_completed)
3201     {
3202 #if DEBUG_MOV_OK
3203       fprintf (stderr, " - no, mem to mem\n");
3204 #endif
3205       return false;
3206     }
3207 
3208 #if DEBUG_MOV_OK
3209   fprintf (stderr, " - ok\n");
3210 #endif
3211   return true;
3212 }
3213 
3214 /* Returns TRUE if two consecutive HImode mov instructions, generated
3215    for moving an immediate double data to a double data type variable
3216    location, can be combined into single SImode mov instruction.  */
3217 bool
3218 m32c_immd_dbl_mov (rtx * operands ATTRIBUTE_UNUSED,
3219 		   machine_mode mode ATTRIBUTE_UNUSED)
3220 {
3221   /* ??? This relied on the now-defunct MEM_SCALAR and MEM_IN_STRUCT_P
3222      flags.  */
3223   return false;
3224 }
3225 
3226 /* Expanders */
3227 
3228 /* Subregs are non-orthogonal for us, because our registers are all
3229    different sizes.  */
3230 static rtx
3231 m32c_subreg (machine_mode outer,
3232 	     rtx x, machine_mode inner, int byte)
3233 {
3234   int r, nr = -1;
3235 
3236   /* Converting MEMs to different types that are the same size, we
3237      just rewrite them.  */
3238   if (GET_CODE (x) == SUBREG
3239       && SUBREG_BYTE (x) == 0
3240       && GET_CODE (SUBREG_REG (x)) == MEM
3241       && (GET_MODE_SIZE (GET_MODE (x))
3242 	  == GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
3243     {
3244       rtx oldx = x;
3245       x = gen_rtx_MEM (GET_MODE (x), XEXP (SUBREG_REG (x), 0));
3246       MEM_COPY_ATTRIBUTES (x, SUBREG_REG (oldx));
3247     }
3248 
3249   /* Push/pop get done as smaller push/pops.  */
3250   if (GET_CODE (x) == MEM
3251       && (GET_CODE (XEXP (x, 0)) == PRE_DEC
3252 	  || GET_CODE (XEXP (x, 0)) == POST_INC))
3253     return gen_rtx_MEM (outer, XEXP (x, 0));
3254   if (GET_CODE (x) == SUBREG
3255       && GET_CODE (XEXP (x, 0)) == MEM
3256       && (GET_CODE (XEXP (XEXP (x, 0), 0)) == PRE_DEC
3257 	  || GET_CODE (XEXP (XEXP (x, 0), 0)) == POST_INC))
3258     return gen_rtx_MEM (outer, XEXP (XEXP (x, 0), 0));
3259 
3260   if (GET_CODE (x) != REG)
3261     {
3262       rtx r = simplify_gen_subreg (outer, x, inner, byte);
3263       if (GET_CODE (r) == SUBREG
3264 	  && GET_CODE (x) == MEM
3265 	  && MEM_VOLATILE_P (x))
3266 	{
3267 	  /* Volatile MEMs don't get simplified, but we need them to
3268 	     be.  We are little endian, so the subreg byte is the
3269 	     offset.  */
3270 	  r = adjust_address_nv (x, outer, byte);
3271 	}
3272       return r;
3273     }
3274 
3275   r = REGNO (x);
3276   if (r >= FIRST_PSEUDO_REGISTER || r == AP_REGNO)
3277     return simplify_gen_subreg (outer, x, inner, byte);
3278 
3279   if (IS_MEM_REGNO (r))
3280     return simplify_gen_subreg (outer, x, inner, byte);
3281 
3282   /* This is where the complexities of our register layout are
3283      described.  */
3284   if (byte == 0)
3285     nr = r;
3286   else if (outer == HImode)
3287     {
3288       if (r == R0_REGNO && byte == 2)
3289 	nr = R2_REGNO;
3290       else if (r == R0_REGNO && byte == 4)
3291 	nr = R1_REGNO;
3292       else if (r == R0_REGNO && byte == 6)
3293 	nr = R3_REGNO;
3294       else if (r == R1_REGNO && byte == 2)
3295 	nr = R3_REGNO;
3296       else if (r == A0_REGNO && byte == 2)
3297 	nr = A1_REGNO;
3298     }
3299   else if (outer == SImode)
3300     {
3301       if (r == R0_REGNO && byte == 0)
3302 	nr = R0_REGNO;
3303       else if (r == R0_REGNO && byte == 4)
3304 	nr = R1_REGNO;
3305     }
3306   if (nr == -1)
3307     {
3308       fprintf (stderr, "m32c_subreg %s %s %d\n",
3309 	       mode_name[outer], mode_name[inner], byte);
3310       debug_rtx (x);
3311       gcc_unreachable ();
3312     }
3313   return gen_rtx_REG (outer, nr);
3314 }
3315 
3316 /* Used to emit move instructions.  We split some moves,
3317    and avoid mem-mem moves.  */
3318 int
3319 m32c_prepare_move (rtx * operands, machine_mode mode)
3320 {
3321   if (far_addr_space_p (operands[0])
3322       && CONSTANT_P (operands[1]))
3323     {
3324       operands[1] = force_reg (GET_MODE (operands[0]), operands[1]);
3325     }
3326   if (TARGET_A16 && mode == PSImode)
3327     return m32c_split_move (operands, mode, 1);
3328   if ((GET_CODE (operands[0]) == MEM)
3329       && (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY))
3330     {
3331       rtx pmv = XEXP (operands[0], 0);
3332       rtx dest_reg = XEXP (pmv, 0);
3333       rtx dest_mod = XEXP (pmv, 1);
3334 
3335       emit_insn (gen_rtx_SET (dest_reg, dest_mod));
3336       operands[0] = gen_rtx_MEM (mode, dest_reg);
3337     }
3338   if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
3339     operands[1] = copy_to_mode_reg (mode, operands[1]);
3340   return 0;
3341 }
3342 
3343 #define DEBUG_SPLIT 0
3344 
3345 /* Returns TRUE if the given PSImode move should be split.  We split
3346    for all r8c/m16c moves, since it doesn't support them, and for
3347    POP.L as we can only *push* SImode.  */
3348 int
3349 m32c_split_psi_p (rtx * operands)
3350 {
3351 #if DEBUG_SPLIT
3352   fprintf (stderr, "\nm32c_split_psi_p\n");
3353   debug_rtx (operands[0]);
3354   debug_rtx (operands[1]);
3355 #endif
3356   if (TARGET_A16)
3357     {
3358 #if DEBUG_SPLIT
3359       fprintf (stderr, "yes, A16\n");
3360 #endif
3361       return 1;
3362     }
3363   if (GET_CODE (operands[1]) == MEM
3364       && GET_CODE (XEXP (operands[1], 0)) == POST_INC)
3365     {
3366 #if DEBUG_SPLIT
3367       fprintf (stderr, "yes, pop.l\n");
3368 #endif
3369       return 1;
3370     }
3371 #if DEBUG_SPLIT
3372   fprintf (stderr, "no, default\n");
3373 #endif
3374   return 0;
3375 }
3376 
3377 /* Split the given move.  SPLIT_ALL is 0 if splitting is optional
3378    (define_expand), 1 if it is not optional (define_insn_and_split),
3379    and 3 for define_split (alternate api). */
3380 int
3381 m32c_split_move (rtx * operands, machine_mode mode, int split_all)
3382 {
3383   rtx s[4], d[4];
3384   int parts, si, di, rev = 0;
3385   int rv = 0, opi = 2;
3386   machine_mode submode = HImode;
3387   rtx *ops, local_ops[10];
3388 
3389   /* define_split modifies the existing operands, but the other two
3390      emit new insns.  OPS is where we store the operand pairs, which
3391      we emit later.  */
3392   if (split_all == 3)
3393     ops = operands;
3394   else
3395     ops = local_ops;
3396 
3397   /* Else HImode.  */
3398   if (mode == DImode)
3399     submode = SImode;
3400 
3401   /* Before splitting mem-mem moves, force one operand into a
3402      register.  */
3403   if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
3404     {
3405 #if DEBUG0
3406       fprintf (stderr, "force_reg...\n");
3407       debug_rtx (operands[1]);
3408 #endif
3409       operands[1] = force_reg (mode, operands[1]);
3410 #if DEBUG0
3411       debug_rtx (operands[1]);
3412 #endif
3413     }
3414 
3415   parts = 2;
3416 
3417 #if DEBUG_SPLIT
3418   fprintf (stderr, "\nsplit_move %d all=%d\n", !can_create_pseudo_p (),
3419 	   split_all);
3420   debug_rtx (operands[0]);
3421   debug_rtx (operands[1]);
3422 #endif
3423 
3424   /* Note that split_all is not used to select the api after this
3425      point, so it's safe to set it to 3 even with define_insn.  */
3426   /* None of the chips can move SI operands to sp-relative addresses,
3427      so we always split those.  */
3428   if (satisfies_constraint_Ss (operands[0]))
3429     split_all = 3;
3430 
3431   if (TARGET_A16
3432       && (far_addr_space_p (operands[0])
3433 	  || far_addr_space_p (operands[1])))
3434     split_all |= 1;
3435 
3436   /* We don't need to split these.  */
3437   if (TARGET_A24
3438       && split_all != 3
3439       && (mode == SImode || mode == PSImode)
3440       && !(GET_CODE (operands[1]) == MEM
3441 	   && GET_CODE (XEXP (operands[1], 0)) == POST_INC))
3442     return 0;
3443 
3444   /* First, enumerate the subregs we'll be dealing with.  */
3445   for (si = 0; si < parts; si++)
3446     {
3447       d[si] =
3448 	m32c_subreg (submode, operands[0], mode,
3449 		     si * GET_MODE_SIZE (submode));
3450       s[si] =
3451 	m32c_subreg (submode, operands[1], mode,
3452 		     si * GET_MODE_SIZE (submode));
3453     }
3454 
3455   /* Split pushes by emitting a sequence of smaller pushes.  */
3456   if (GET_CODE (d[0]) == MEM && GET_CODE (XEXP (d[0], 0)) == PRE_DEC)
3457     {
3458       for (si = parts - 1; si >= 0; si--)
3459 	{
3460 	  ops[opi++] = gen_rtx_MEM (submode,
3461 				    gen_rtx_PRE_DEC (Pmode,
3462 						     gen_rtx_REG (Pmode,
3463 								  SP_REGNO)));
3464 	  ops[opi++] = s[si];
3465 	}
3466 
3467       rv = 1;
3468     }
3469   /* Likewise for pops.  */
3470   else if (GET_CODE (s[0]) == MEM && GET_CODE (XEXP (s[0], 0)) == POST_INC)
3471     {
3472       for (di = 0; di < parts; di++)
3473 	{
3474 	  ops[opi++] = d[di];
3475 	  ops[opi++] = gen_rtx_MEM (submode,
3476 				    gen_rtx_POST_INC (Pmode,
3477 						      gen_rtx_REG (Pmode,
3478 								   SP_REGNO)));
3479 	}
3480       rv = 1;
3481     }
3482   else if (split_all)
3483     {
3484       /* if d[di] == s[si] for any di < si, we'll early clobber. */
3485       for (di = 0; di < parts - 1; di++)
3486 	for (si = di + 1; si < parts; si++)
3487 	  if (reg_mentioned_p (d[di], s[si]))
3488 	    rev = 1;
3489 
3490       if (rev)
3491 	for (si = 0; si < parts; si++)
3492 	  {
3493 	    ops[opi++] = d[si];
3494 	    ops[opi++] = s[si];
3495 	  }
3496       else
3497 	for (si = parts - 1; si >= 0; si--)
3498 	  {
3499 	    ops[opi++] = d[si];
3500 	    ops[opi++] = s[si];
3501 	  }
3502       rv = 1;
3503     }
3504   /* Now emit any moves we may have accumulated.  */
3505   if (rv && split_all != 3)
3506     {
3507       int i;
3508       for (i = 2; i < opi; i += 2)
3509 	emit_move_insn (ops[i], ops[i + 1]);
3510     }
3511   return rv;
3512 }
3513 
3514 /* The m32c has a number of opcodes that act like memcpy, strcmp, and
3515    the like.  For the R8C they expect one of the addresses to be in
3516    R1L:An so we need to arrange for that.  Otherwise, it's just a
3517    matter of picking out the operands we want and emitting the right
3518    pattern for them.  All these expanders, which correspond to
3519    patterns in blkmov.md, must return nonzero if they expand the insn,
3520    or zero if they should FAIL.  */
3521 
3522 /* This is a memset() opcode.  All operands are implied, so we need to
3523    arrange for them to be in the right registers.  The opcode wants
3524    addresses, not [mem] syntax.  $0 is the destination (MEM:BLK), $1
3525    the count (HI), and $2 the value (QI).  */
3526 int
3527 m32c_expand_setmemhi(rtx *operands)
3528 {
3529   rtx desta, count, val;
3530   rtx desto, counto;
3531 
3532   desta = XEXP (operands[0], 0);
3533   count = operands[1];
3534   val = operands[2];
3535 
3536   desto = gen_reg_rtx (Pmode);
3537   counto = gen_reg_rtx (HImode);
3538 
3539   if (GET_CODE (desta) != REG
3540       || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3541     desta = copy_to_mode_reg (Pmode, desta);
3542 
3543   /* This looks like an arbitrary restriction, but this is by far the
3544      most common case.  For counts 8..14 this actually results in
3545      smaller code with no speed penalty because the half-sized
3546      constant can be loaded with a shorter opcode.  */
3547   if (GET_CODE (count) == CONST_INT
3548       && GET_CODE (val) == CONST_INT
3549       && ! (INTVAL (count) & 1)
3550       && (INTVAL (count) > 1)
3551       && (INTVAL (val) <= 7 && INTVAL (val) >= -8))
3552     {
3553       unsigned v = INTVAL (val) & 0xff;
3554       v = v | (v << 8);
3555       count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3556       val = copy_to_mode_reg (HImode, GEN_INT (v));
3557       if (TARGET_A16)
3558 	emit_insn (gen_setmemhi_whi_op (desto, counto, val, desta, count));
3559       else
3560 	emit_insn (gen_setmemhi_wpsi_op (desto, counto, val, desta, count));
3561       return 1;
3562     }
3563 
3564   /* This is the generalized memset() case.  */
3565   if (GET_CODE (val) != REG
3566       || REGNO (val) < FIRST_PSEUDO_REGISTER)
3567     val = copy_to_mode_reg (QImode, val);
3568 
3569   if (GET_CODE (count) != REG
3570       || REGNO (count) < FIRST_PSEUDO_REGISTER)
3571     count = copy_to_mode_reg (HImode, count);
3572 
3573   if (TARGET_A16)
3574     emit_insn (gen_setmemhi_bhi_op (desto, counto, val, desta, count));
3575   else
3576     emit_insn (gen_setmemhi_bpsi_op (desto, counto, val, desta, count));
3577 
3578   return 1;
3579 }
3580 
3581 /* This is a memcpy() opcode.  All operands are implied, so we need to
3582    arrange for them to be in the right registers.  The opcode wants
3583    addresses, not [mem] syntax.  $0 is the destination (MEM:BLK), $1
3584    is the source (MEM:BLK), and $2 the count (HI).  */
3585 int
3586 m32c_expand_movmemhi(rtx *operands)
3587 {
3588   rtx desta, srca, count;
3589   rtx desto, srco, counto;
3590 
3591   desta = XEXP (operands[0], 0);
3592   srca = XEXP (operands[1], 0);
3593   count = operands[2];
3594 
3595   desto = gen_reg_rtx (Pmode);
3596   srco = gen_reg_rtx (Pmode);
3597   counto = gen_reg_rtx (HImode);
3598 
3599   if (GET_CODE (desta) != REG
3600       || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3601     desta = copy_to_mode_reg (Pmode, desta);
3602 
3603   if (GET_CODE (srca) != REG
3604       || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3605     srca = copy_to_mode_reg (Pmode, srca);
3606 
3607   /* Similar to setmem, but we don't need to check the value.  */
3608   if (GET_CODE (count) == CONST_INT
3609       && ! (INTVAL (count) & 1)
3610       && (INTVAL (count) > 1))
3611     {
3612       count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3613       if (TARGET_A16)
3614 	emit_insn (gen_movmemhi_whi_op (desto, srco, counto, desta, srca, count));
3615       else
3616 	emit_insn (gen_movmemhi_wpsi_op (desto, srco, counto, desta, srca, count));
3617       return 1;
3618     }
3619 
3620   /* This is the generalized memset() case.  */
3621   if (GET_CODE (count) != REG
3622       || REGNO (count) < FIRST_PSEUDO_REGISTER)
3623     count = copy_to_mode_reg (HImode, count);
3624 
3625   if (TARGET_A16)
3626     emit_insn (gen_movmemhi_bhi_op (desto, srco, counto, desta, srca, count));
3627   else
3628     emit_insn (gen_movmemhi_bpsi_op (desto, srco, counto, desta, srca, count));
3629 
3630   return 1;
3631 }
3632 
3633 /* This is a stpcpy() opcode.  $0 is the destination (MEM:BLK) after
3634    the copy, which should point to the NUL at the end of the string,
3635    $1 is the destination (MEM:BLK), and $2 is the source (MEM:BLK).
3636    Since our opcode leaves the destination pointing *after* the NUL,
3637    we must emit an adjustment.  */
3638 int
3639 m32c_expand_movstr(rtx *operands)
3640 {
3641   rtx desta, srca;
3642   rtx desto, srco;
3643 
3644   desta = XEXP (operands[1], 0);
3645   srca = XEXP (operands[2], 0);
3646 
3647   desto = gen_reg_rtx (Pmode);
3648   srco = gen_reg_rtx (Pmode);
3649 
3650   if (GET_CODE (desta) != REG
3651       || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3652     desta = copy_to_mode_reg (Pmode, desta);
3653 
3654   if (GET_CODE (srca) != REG
3655       || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3656     srca = copy_to_mode_reg (Pmode, srca);
3657 
3658   emit_insn (gen_movstr_op (desto, srco, desta, srca));
3659   /* desto ends up being a1, which allows this type of add through MOVA.  */
3660   emit_insn (gen_addpsi3 (operands[0], desto, GEN_INT (-1)));
3661 
3662   return 1;
3663 }
3664 
3665 /* This is a strcmp() opcode.  $0 is the destination (HI) which holds
3666    <=>0 depending on the comparison, $1 is one string (MEM:BLK), and
3667    $2 is the other (MEM:BLK).  We must do the comparison, and then
3668    convert the flags to a signed integer result.  */
3669 int
3670 m32c_expand_cmpstr(rtx *operands)
3671 {
3672   rtx src1a, src2a;
3673 
3674   src1a = XEXP (operands[1], 0);
3675   src2a = XEXP (operands[2], 0);
3676 
3677   if (GET_CODE (src1a) != REG
3678       || REGNO (src1a) < FIRST_PSEUDO_REGISTER)
3679     src1a = copy_to_mode_reg (Pmode, src1a);
3680 
3681   if (GET_CODE (src2a) != REG
3682       || REGNO (src2a) < FIRST_PSEUDO_REGISTER)
3683     src2a = copy_to_mode_reg (Pmode, src2a);
3684 
3685   emit_insn (gen_cmpstrhi_op (src1a, src2a, src1a, src2a));
3686   emit_insn (gen_cond_to_int (operands[0]));
3687 
3688   return 1;
3689 }
3690 
3691 
3692 typedef rtx (*shift_gen_func)(rtx, rtx, rtx);
3693 
3694 static shift_gen_func
3695 shift_gen_func_for (int mode, int code)
3696 {
3697 #define GFF(m,c,f) if (mode == m && code == c) return f
3698   GFF(QImode,  ASHIFT,   gen_ashlqi3_i);
3699   GFF(QImode,  ASHIFTRT, gen_ashrqi3_i);
3700   GFF(QImode,  LSHIFTRT, gen_lshrqi3_i);
3701   GFF(HImode,  ASHIFT,   gen_ashlhi3_i);
3702   GFF(HImode,  ASHIFTRT, gen_ashrhi3_i);
3703   GFF(HImode,  LSHIFTRT, gen_lshrhi3_i);
3704   GFF(PSImode, ASHIFT,   gen_ashlpsi3_i);
3705   GFF(PSImode, ASHIFTRT, gen_ashrpsi3_i);
3706   GFF(PSImode, LSHIFTRT, gen_lshrpsi3_i);
3707   GFF(SImode,  ASHIFT,   TARGET_A16 ? gen_ashlsi3_16 : gen_ashlsi3_24);
3708   GFF(SImode,  ASHIFTRT, TARGET_A16 ? gen_ashrsi3_16 : gen_ashrsi3_24);
3709   GFF(SImode,  LSHIFTRT, TARGET_A16 ? gen_lshrsi3_16 : gen_lshrsi3_24);
3710 #undef GFF
3711   gcc_unreachable ();
3712 }
3713 
3714 /* The m32c only has one shift, but it takes a signed count.  GCC
3715    doesn't want this, so we fake it by negating any shift count when
3716    we're pretending to shift the other way.  Also, the shift count is
3717    limited to -8..8.  It's slightly better to use two shifts for 9..15
3718    than to load the count into r1h, so we do that too.  */
3719 int
3720 m32c_prepare_shift (rtx * operands, int scale, int shift_code)
3721 {
3722   machine_mode mode = GET_MODE (operands[0]);
3723   shift_gen_func func = shift_gen_func_for (mode, shift_code);
3724   rtx temp;
3725 
3726   if (GET_CODE (operands[2]) == CONST_INT)
3727     {
3728       int maxc = TARGET_A24 && (mode == PSImode || mode == SImode) ? 32 : 8;
3729       int count = INTVAL (operands[2]) * scale;
3730 
3731       while (count > maxc)
3732 	{
3733 	  temp = gen_reg_rtx (mode);
3734 	  emit_insn (func (temp, operands[1], GEN_INT (maxc)));
3735 	  operands[1] = temp;
3736 	  count -= maxc;
3737 	}
3738       while (count < -maxc)
3739 	{
3740 	  temp = gen_reg_rtx (mode);
3741 	  emit_insn (func (temp, operands[1], GEN_INT (-maxc)));
3742 	  operands[1] = temp;
3743 	  count += maxc;
3744 	}
3745       emit_insn (func (operands[0], operands[1], GEN_INT (count)));
3746       return 1;
3747     }
3748 
3749   temp = gen_reg_rtx (QImode);
3750   if (scale < 0)
3751     /* The pattern has a NEG that corresponds to this. */
3752     emit_move_insn (temp, gen_rtx_NEG (QImode, operands[2]));
3753   else if (TARGET_A16 && mode == SImode)
3754     /* We do this because the code below may modify this, we don't
3755        want to modify the origin of this value.  */
3756     emit_move_insn (temp, operands[2]);
3757   else
3758     /* We'll only use it for the shift, no point emitting a move.  */
3759     temp = operands[2];
3760 
3761   if (TARGET_A16 && GET_MODE_SIZE (mode) == 4)
3762     {
3763       /* The m16c has a limit of -16..16 for SI shifts, even when the
3764 	 shift count is in a register.  Since there are so many targets
3765 	 of these shifts, it's better to expand the RTL here than to
3766 	 call a helper function.
3767 
3768 	 The resulting code looks something like this:
3769 
3770 		cmp.b	r1h,-16
3771 		jge.b	1f
3772 		shl.l	-16,dest
3773 		add.b	r1h,16
3774 	1f:	cmp.b	r1h,16
3775 		jle.b	1f
3776 		shl.l	16,dest
3777 		sub.b	r1h,16
3778 	1f:	shl.l	r1h,dest
3779 
3780 	 We take advantage of the fact that "negative" shifts are
3781 	 undefined to skip one of the comparisons.  */
3782 
3783       rtx count;
3784       rtx tempvar;
3785       rtx_insn *insn;
3786 
3787       emit_move_insn (operands[0], operands[1]);
3788 
3789       count = temp;
3790       rtx_code_label *label = gen_label_rtx ();
3791       LABEL_NUSES (label) ++;
3792 
3793       tempvar = gen_reg_rtx (mode);
3794 
3795       if (shift_code == ASHIFT)
3796 	{
3797 	  /* This is a left shift.  We only need check positive counts.  */
3798 	  emit_jump_insn (gen_cbranchqi4 (gen_rtx_LE (VOIDmode, 0, 0),
3799 					  count, GEN_INT (16), label));
3800 	  emit_insn (func (tempvar, operands[0], GEN_INT (8)));
3801 	  emit_insn (func (operands[0], tempvar, GEN_INT (8)));
3802 	  insn = emit_insn (gen_addqi3 (count, count, GEN_INT (-16)));
3803 	  emit_label_after (label, insn);
3804 	}
3805       else
3806 	{
3807 	  /* This is a right shift.  We only need check negative counts.  */
3808 	  emit_jump_insn (gen_cbranchqi4 (gen_rtx_GE (VOIDmode, 0, 0),
3809 					  count, GEN_INT (-16), label));
3810 	  emit_insn (func (tempvar, operands[0], GEN_INT (-8)));
3811 	  emit_insn (func (operands[0], tempvar, GEN_INT (-8)));
3812 	  insn = emit_insn (gen_addqi3 (count, count, GEN_INT (16)));
3813 	  emit_label_after (label, insn);
3814 	}
3815       operands[1] = operands[0];
3816       emit_insn (func (operands[0], operands[0], count));
3817       return 1;
3818     }
3819 
3820   operands[2] = temp;
3821   return 0;
3822 }
3823 
3824 /* The m32c has a limited range of operations that work on PSImode
3825    values; we have to expand to SI, do the math, and truncate back to
3826    PSI.  Yes, this is expensive, but hopefully gcc will learn to avoid
3827    those cases.  */
3828 void
3829 m32c_expand_neg_mulpsi3 (rtx * operands)
3830 {
3831   /* operands: a = b * i */
3832   rtx temp1; /* b as SI */
3833   rtx scale /* i as SI */;
3834   rtx temp2; /* a*b as SI */
3835 
3836   temp1 = gen_reg_rtx (SImode);
3837   temp2 = gen_reg_rtx (SImode);
3838   if (GET_CODE (operands[2]) != CONST_INT)
3839     {
3840       scale = gen_reg_rtx (SImode);
3841       emit_insn (gen_zero_extendpsisi2 (scale, operands[2]));
3842     }
3843   else
3844     scale = copy_to_mode_reg (SImode, operands[2]);
3845 
3846   emit_insn (gen_zero_extendpsisi2 (temp1, operands[1]));
3847   temp2 = expand_simple_binop (SImode, MULT, temp1, scale, temp2, 1, OPTAB_LIB);
3848   emit_insn (gen_truncsipsi2 (operands[0], temp2));
3849 }
3850 
3851 /* Pattern Output Functions */
3852 
3853 int
3854 m32c_expand_movcc (rtx *operands)
3855 {
3856   rtx rel = operands[1];
3857 
3858   if (GET_CODE (rel) != EQ && GET_CODE (rel) != NE)
3859     return 1;
3860   if (GET_CODE (operands[2]) != CONST_INT
3861       || GET_CODE (operands[3]) != CONST_INT)
3862     return 1;
3863   if (GET_CODE (rel) == NE)
3864     {
3865       rtx tmp = operands[2];
3866       operands[2] = operands[3];
3867       operands[3] = tmp;
3868       rel = gen_rtx_EQ (GET_MODE (rel), XEXP (rel, 0), XEXP (rel, 1));
3869     }
3870 
3871   emit_move_insn (operands[0],
3872 		  gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
3873 					rel,
3874 					operands[2],
3875 					operands[3]));
3876   return 0;
3877 }
3878 
3879 /* Used for the "insv" pattern.  Return nonzero to fail, else done.  */
3880 int
3881 m32c_expand_insv (rtx *operands)
3882 {
3883   rtx op0, src0, p;
3884   int mask;
3885 
3886   if (INTVAL (operands[1]) != 1)
3887     return 1;
3888 
3889   /* Our insv opcode (bset, bclr) can only insert a one-bit constant.  */
3890   if (GET_CODE (operands[3]) != CONST_INT)
3891     return 1;
3892   if (INTVAL (operands[3]) != 0
3893       && INTVAL (operands[3]) != 1
3894       && INTVAL (operands[3]) != -1)
3895     return 1;
3896 
3897   mask = 1 << INTVAL (operands[2]);
3898 
3899   op0 = operands[0];
3900   if (GET_CODE (op0) == SUBREG
3901       && SUBREG_BYTE (op0) == 0)
3902     {
3903       rtx sub = SUBREG_REG (op0);
3904       if (GET_MODE (sub) == HImode || GET_MODE (sub) == QImode)
3905 	op0 = sub;
3906     }
3907 
3908   if (!can_create_pseudo_p ()
3909       || (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0)))
3910     src0 = op0;
3911   else
3912     {
3913       src0 = gen_reg_rtx (GET_MODE (op0));
3914       emit_move_insn (src0, op0);
3915     }
3916 
3917   if (GET_MODE (op0) == HImode
3918       && INTVAL (operands[2]) >= 8
3919       && GET_CODE (op0) == MEM)
3920     {
3921       /* We are little endian.  */
3922       rtx new_mem = gen_rtx_MEM (QImode, plus_constant (Pmode,
3923 							XEXP (op0, 0), 1));
3924       MEM_COPY_ATTRIBUTES (new_mem, op0);
3925       mask >>= 8;
3926     }
3927 
3928   /* First, we generate a mask with the correct polarity.  If we are
3929      storing a zero, we want an AND mask, so invert it.  */
3930   if (INTVAL (operands[3]) == 0)
3931     {
3932       /* Storing a zero, use an AND mask */
3933       if (GET_MODE (op0) == HImode)
3934 	mask ^= 0xffff;
3935       else
3936 	mask ^= 0xff;
3937     }
3938   /* Now we need to properly sign-extend the mask in case we need to
3939      fall back to an AND or OR opcode.  */
3940   if (GET_MODE (op0) == HImode)
3941     {
3942       if (mask & 0x8000)
3943 	mask -= 0x10000;
3944     }
3945   else
3946     {
3947       if (mask & 0x80)
3948 	mask -= 0x100;
3949     }
3950 
3951   switch (  (INTVAL (operands[3]) ? 4 : 0)
3952 	  + ((GET_MODE (op0) == HImode) ? 2 : 0)
3953 	  + (TARGET_A24 ? 1 : 0))
3954     {
3955     case 0: p = gen_andqi3_16 (op0, src0, GEN_INT (mask)); break;
3956     case 1: p = gen_andqi3_24 (op0, src0, GEN_INT (mask)); break;
3957     case 2: p = gen_andhi3_16 (op0, src0, GEN_INT (mask)); break;
3958     case 3: p = gen_andhi3_24 (op0, src0, GEN_INT (mask)); break;
3959     case 4: p = gen_iorqi3_16 (op0, src0, GEN_INT (mask)); break;
3960     case 5: p = gen_iorqi3_24 (op0, src0, GEN_INT (mask)); break;
3961     case 6: p = gen_iorhi3_16 (op0, src0, GEN_INT (mask)); break;
3962     case 7: p = gen_iorhi3_24 (op0, src0, GEN_INT (mask)); break;
3963     default: p = NULL_RTX; break; /* Not reached, but silences a warning.  */
3964     }
3965 
3966   emit_insn (p);
3967   return 0;
3968 }
3969 
3970 const char *
3971 m32c_scc_pattern(rtx *operands, RTX_CODE code)
3972 {
3973   static char buf[30];
3974   if (GET_CODE (operands[0]) == REG
3975       && REGNO (operands[0]) == R0_REGNO)
3976     {
3977       if (code == EQ)
3978 	return "stzx\t#1,#0,r0l";
3979       if (code == NE)
3980 	return "stzx\t#0,#1,r0l";
3981     }
3982   sprintf(buf, "bm%s\t0,%%h0\n\tand.b\t#1,%%0", GET_RTX_NAME (code));
3983   return buf;
3984 }
3985 
3986 /* Encode symbol attributes of a SYMBOL_REF into its
3987    SYMBOL_REF_FLAGS. */
3988 static void
3989 m32c_encode_section_info (tree decl, rtx rtl, int first)
3990 {
3991   int extra_flags = 0;
3992 
3993   default_encode_section_info (decl, rtl, first);
3994   if (TREE_CODE (decl) == FUNCTION_DECL
3995       && m32c_special_page_vector_p (decl))
3996 
3997     extra_flags = SYMBOL_FLAG_FUNCVEC_FUNCTION;
3998 
3999   if (extra_flags)
4000     SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= extra_flags;
4001 }
4002 
4003 /* Returns TRUE if the current function is a leaf, and thus we can
4004    determine which registers an interrupt function really needs to
4005    save.  The logic below is mostly about finding the insn sequence
4006    that's the function, versus any sequence that might be open for the
4007    current insn.  */
4008 static int
4009 m32c_leaf_function_p (void)
4010 {
4011   int rv;
4012 
4013   push_topmost_sequence ();
4014   rv = leaf_function_p ();
4015   pop_topmost_sequence ();
4016   return rv;
4017 }
4018 
4019 /* Returns TRUE if the current function needs to use the ENTER/EXIT
4020    opcodes.  If the function doesn't need the frame base or stack
4021    pointer, it can use the simpler RTS opcode.  */
4022 static bool
4023 m32c_function_needs_enter (void)
4024 {
4025   rtx_insn *insn;
4026   rtx sp = gen_rtx_REG (Pmode, SP_REGNO);
4027   rtx fb = gen_rtx_REG (Pmode, FB_REGNO);
4028 
4029   for (insn = get_topmost_sequence ()->first; insn; insn = NEXT_INSN (insn))
4030     if (NONDEBUG_INSN_P (insn))
4031       {
4032 	if (reg_mentioned_p (sp, insn))
4033 	  return true;
4034 	if (reg_mentioned_p (fb, insn))
4035 	  return true;
4036       }
4037   return false;
4038 }
4039 
4040 /* Mark all the subexpressions of the PARALLEL rtx PAR as
4041    frame-related.  Return PAR.
4042 
4043    dwarf2out.c:dwarf2out_frame_debug_expr ignores sub-expressions of a
4044    PARALLEL rtx other than the first if they do not have the
4045    FRAME_RELATED flag set on them.  So this function is handy for
4046    marking up 'enter' instructions.  */
4047 static rtx
4048 m32c_all_frame_related (rtx par)
4049 {
4050   int len = XVECLEN (par, 0);
4051   int i;
4052 
4053   for (i = 0; i < len; i++)
4054     F (XVECEXP (par, 0, i));
4055 
4056   return par;
4057 }
4058 
4059 /* Emits the prologue.  See the frame layout comment earlier in this
4060    file.  We can reserve up to 256 bytes with the ENTER opcode, beyond
4061    that we manually update sp.  */
4062 void
4063 m32c_emit_prologue (void)
4064 {
4065   int frame_size, extra_frame_size = 0, reg_save_size;
4066   int complex_prologue = 0;
4067 
4068   cfun->machine->is_leaf = m32c_leaf_function_p ();
4069   if (interrupt_p (cfun->decl))
4070     {
4071       cfun->machine->is_interrupt = 1;
4072       complex_prologue = 1;
4073     }
4074   else if (bank_switch_p (cfun->decl))
4075     warning (OPT_Wattributes,
4076 	     "%<bank_switch%> has no effect on non-interrupt functions");
4077 
4078   reg_save_size = m32c_pushm_popm (PP_justcount);
4079 
4080   if (interrupt_p (cfun->decl))
4081     {
4082       if (bank_switch_p (cfun->decl))
4083 	emit_insn (gen_fset_b ());
4084       else if (cfun->machine->intr_pushm)
4085 	emit_insn (gen_pushm (GEN_INT (cfun->machine->intr_pushm)));
4086     }
4087 
4088   frame_size =
4089     m32c_initial_elimination_offset (FB_REGNO, SP_REGNO) - reg_save_size;
4090   if (frame_size == 0
4091       && !m32c_function_needs_enter ())
4092     cfun->machine->use_rts = 1;
4093 
4094   if (flag_stack_usage_info)
4095     current_function_static_stack_size = frame_size;
4096 
4097   if (frame_size > 254)
4098     {
4099       extra_frame_size = frame_size - 254;
4100       frame_size = 254;
4101     }
4102   if (cfun->machine->use_rts == 0)
4103     F (emit_insn (m32c_all_frame_related
4104 		  (TARGET_A16
4105 		   ? gen_prologue_enter_16 (GEN_INT (frame_size + 2))
4106 		   : gen_prologue_enter_24 (GEN_INT (frame_size + 4)))));
4107 
4108   if (extra_frame_size)
4109     {
4110       complex_prologue = 1;
4111       if (TARGET_A16)
4112 	F (emit_insn (gen_addhi3 (gen_rtx_REG (HImode, SP_REGNO),
4113 				  gen_rtx_REG (HImode, SP_REGNO),
4114 				  GEN_INT (-extra_frame_size))));
4115       else
4116 	F (emit_insn (gen_addpsi3 (gen_rtx_REG (PSImode, SP_REGNO),
4117 				   gen_rtx_REG (PSImode, SP_REGNO),
4118 				   GEN_INT (-extra_frame_size))));
4119     }
4120 
4121   complex_prologue += m32c_pushm_popm (PP_pushm);
4122 
4123   /* This just emits a comment into the .s file for debugging.  */
4124   if (complex_prologue)
4125     emit_insn (gen_prologue_end ());
4126 }
4127 
4128 /* Likewise, for the epilogue.  The only exception is that, for
4129    interrupts, we must manually unwind the frame as the REIT opcode
4130    doesn't do that.  */
4131 void
4132 m32c_emit_epilogue (void)
4133 {
4134   int popm_count = m32c_pushm_popm (PP_justcount);
4135 
4136   /* This just emits a comment into the .s file for debugging.  */
4137   if (popm_count > 0 || cfun->machine->is_interrupt)
4138     emit_insn (gen_epilogue_start ());
4139 
4140   if (popm_count > 0)
4141     m32c_pushm_popm (PP_popm);
4142 
4143   if (cfun->machine->is_interrupt)
4144     {
4145       machine_mode spmode = TARGET_A16 ? HImode : PSImode;
4146 
4147       /* REIT clears B flag and restores $fp for us, but we still
4148 	 have to fix up the stack.  USE_RTS just means we didn't
4149 	 emit ENTER.  */
4150       if (!cfun->machine->use_rts)
4151 	{
4152 	  emit_move_insn (gen_rtx_REG (spmode, A0_REGNO),
4153 			  gen_rtx_REG (spmode, FP_REGNO));
4154 	  emit_move_insn (gen_rtx_REG (spmode, SP_REGNO),
4155 			  gen_rtx_REG (spmode, A0_REGNO));
4156 	  /* We can't just add this to the POPM because it would be in
4157 	     the wrong order, and wouldn't fix the stack if we're bank
4158 	     switching.  */
4159 	  if (TARGET_A16)
4160 	    emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, FP_REGNO)));
4161 	  else
4162 	    emit_insn (gen_poppsi (gen_rtx_REG (PSImode, FP_REGNO)));
4163 	}
4164       if (!bank_switch_p (cfun->decl) && cfun->machine->intr_pushm)
4165 	emit_insn (gen_popm (GEN_INT (cfun->machine->intr_pushm)));
4166 
4167       /* The FREIT (Fast REturn from InTerrupt) instruction should be
4168          generated only for M32C/M32CM targets (generate the REIT
4169          instruction otherwise).  */
4170       if (fast_interrupt_p (cfun->decl))
4171         {
4172           /* Check if fast_attribute is set for M32C or M32CM.  */
4173           if (TARGET_A24)
4174             {
4175               emit_jump_insn (gen_epilogue_freit ());
4176             }
4177           /* If fast_interrupt attribute is set for an R8C or M16C
4178              target ignore this attribute and generated REIT
4179              instruction.  */
4180           else
4181 	    {
4182 	      warning (OPT_Wattributes,
4183 		       "%<fast_interrupt%> attribute directive ignored");
4184 	      emit_jump_insn (gen_epilogue_reit_16 ());
4185 	    }
4186         }
4187       else if (TARGET_A16)
4188 	emit_jump_insn (gen_epilogue_reit_16 ());
4189       else
4190 	emit_jump_insn (gen_epilogue_reit_24 ());
4191     }
4192   else if (cfun->machine->use_rts)
4193     emit_jump_insn (gen_epilogue_rts ());
4194   else if (TARGET_A16)
4195     emit_jump_insn (gen_epilogue_exitd_16 ());
4196   else
4197     emit_jump_insn (gen_epilogue_exitd_24 ());
4198 }
4199 
4200 void
4201 m32c_emit_eh_epilogue (rtx ret_addr)
4202 {
4203   /* R0[R2] has the stack adjustment.  R1[R3] has the address to
4204      return to.  We have to fudge the stack, pop everything, pop SP
4205      (fudged), and return (fudged).  This is actually easier to do in
4206      assembler, so punt to libgcc.  */
4207   emit_jump_insn (gen_eh_epilogue (ret_addr, cfun->machine->eh_stack_adjust));
4208   /*  emit_clobber (gen_rtx_REG (HImode, R0L_REGNO)); */
4209 }
4210 
4211 /* Indicate which flags must be properly set for a given conditional.  */
4212 static int
4213 flags_needed_for_conditional (rtx cond)
4214 {
4215   switch (GET_CODE (cond))
4216     {
4217     case LE:
4218     case GT:
4219       return FLAGS_OSZ;
4220     case LEU:
4221     case GTU:
4222       return FLAGS_ZC;
4223     case LT:
4224     case GE:
4225       return FLAGS_OS;
4226     case LTU:
4227     case GEU:
4228       return FLAGS_C;
4229     case EQ:
4230     case NE:
4231       return FLAGS_Z;
4232     default:
4233       return FLAGS_N;
4234     }
4235 }
4236 
4237 #define DEBUG_CMP 0
4238 
4239 /* Returns true if a compare insn is redundant because it would only
4240    set flags that are already set correctly.  */
4241 static bool
4242 m32c_compare_redundant (rtx_insn *cmp, rtx *operands)
4243 {
4244   int flags_needed;
4245   int pflags;
4246   rtx_insn *prev;
4247   rtx pp, next;
4248   rtx op0, op1;
4249 #if DEBUG_CMP
4250   int prev_icode, i;
4251 #endif
4252 
4253   op0 = operands[0];
4254   op1 = operands[1];
4255 
4256 #if DEBUG_CMP
4257   fprintf(stderr, "\n\033[32mm32c_compare_redundant\033[0m\n");
4258   debug_rtx(cmp);
4259   for (i=0; i<2; i++)
4260     {
4261       fprintf(stderr, "operands[%d] = ", i);
4262       debug_rtx(operands[i]);
4263     }
4264 #endif
4265 
4266   next = next_nonnote_insn (cmp);
4267   if (!next || !INSN_P (next))
4268     {
4269 #if DEBUG_CMP
4270       fprintf(stderr, "compare not followed by insn\n");
4271       debug_rtx(next);
4272 #endif
4273       return false;
4274     }
4275   if (GET_CODE (PATTERN (next)) == SET
4276       && GET_CODE (XEXP ( PATTERN (next), 1)) == IF_THEN_ELSE)
4277     {
4278       next = XEXP (XEXP (PATTERN (next), 1), 0);
4279     }
4280   else if (GET_CODE (PATTERN (next)) == SET)
4281     {
4282       /* If this is a conditional, flags_needed will be something
4283 	 other than FLAGS_N, which we test below.  */
4284       next = XEXP (PATTERN (next), 1);
4285     }
4286   else
4287     {
4288 #if DEBUG_CMP
4289       fprintf(stderr, "compare not followed by conditional\n");
4290       debug_rtx(next);
4291 #endif
4292       return false;
4293     }
4294 #if DEBUG_CMP
4295   fprintf(stderr, "conditional is: ");
4296   debug_rtx(next);
4297 #endif
4298 
4299   flags_needed = flags_needed_for_conditional (next);
4300   if (flags_needed == FLAGS_N)
4301     {
4302 #if DEBUG_CMP
4303       fprintf(stderr, "compare not followed by conditional\n");
4304       debug_rtx(next);
4305 #endif
4306       return false;
4307     }
4308 
4309   /* Compare doesn't set overflow and carry the same way that
4310      arithmetic instructions do, so we can't replace those.  */
4311   if (flags_needed & FLAGS_OC)
4312     return false;
4313 
4314   prev = cmp;
4315   do {
4316     prev = prev_nonnote_insn (prev);
4317     if (!prev)
4318       {
4319 #if DEBUG_CMP
4320 	fprintf(stderr, "No previous insn.\n");
4321 #endif
4322 	return false;
4323       }
4324     if (!INSN_P (prev))
4325       {
4326 #if DEBUG_CMP
4327 	fprintf(stderr, "Previous insn is a non-insn.\n");
4328 #endif
4329 	return false;
4330       }
4331     pp = PATTERN (prev);
4332     if (GET_CODE (pp) != SET)
4333       {
4334 #if DEBUG_CMP
4335 	fprintf(stderr, "Previous insn is not a SET.\n");
4336 #endif
4337 	return false;
4338       }
4339     pflags = get_attr_flags (prev);
4340 
4341     /* Looking up attributes of previous insns corrupted the recog
4342        tables.  */
4343     INSN_UID (cmp) = -1;
4344     recog (PATTERN (cmp), cmp, 0);
4345 
4346     if (pflags == FLAGS_N
4347 	&& reg_mentioned_p (op0, pp))
4348       {
4349 #if DEBUG_CMP
4350 	fprintf(stderr, "intermediate non-flags insn uses op:\n");
4351 	debug_rtx(prev);
4352 #endif
4353 	return false;
4354       }
4355 
4356     /* Check for comparisons against memory - between volatiles and
4357        aliases, we just can't risk this one.  */
4358     if (GET_CODE (operands[0]) == MEM
4359 	|| GET_CODE (operands[0]) == MEM)
4360       {
4361 #if DEBUG_CMP
4362 	fprintf(stderr, "comparisons with memory:\n");
4363 	debug_rtx(prev);
4364 #endif
4365 	return false;
4366       }
4367 
4368     /* Check for PREV changing a register that's used to compute a
4369        value in CMP, even if it doesn't otherwise change flags.  */
4370     if (GET_CODE (operands[0]) == REG
4371 	&& rtx_referenced_p (SET_DEST (PATTERN (prev)), operands[0]))
4372       {
4373 #if DEBUG_CMP
4374 	fprintf(stderr, "sub-value affected, op0:\n");
4375 	debug_rtx(prev);
4376 #endif
4377 	return false;
4378       }
4379     if (GET_CODE (operands[1]) == REG
4380 	&& rtx_referenced_p (SET_DEST (PATTERN (prev)), operands[1]))
4381       {
4382 #if DEBUG_CMP
4383 	fprintf(stderr, "sub-value affected, op1:\n");
4384 	debug_rtx(prev);
4385 #endif
4386 	return false;
4387       }
4388 
4389   } while (pflags == FLAGS_N);
4390 #if DEBUG_CMP
4391   fprintf(stderr, "previous flag-setting insn:\n");
4392   debug_rtx(prev);
4393   debug_rtx(pp);
4394 #endif
4395 
4396   if (GET_CODE (pp) == SET
4397       && GET_CODE (XEXP (pp, 0)) == REG
4398       && REGNO (XEXP (pp, 0)) == FLG_REGNO
4399       && GET_CODE (XEXP (pp, 1)) == COMPARE)
4400     {
4401       /* Adjacent cbranches must have the same operands to be
4402 	 redundant.  */
4403       rtx pop0 = XEXP (XEXP (pp, 1), 0);
4404       rtx pop1 = XEXP (XEXP (pp, 1), 1);
4405 #if DEBUG_CMP
4406       fprintf(stderr, "adjacent cbranches\n");
4407       debug_rtx(pop0);
4408       debug_rtx(pop1);
4409 #endif
4410       if (rtx_equal_p (op0, pop0)
4411 	  && rtx_equal_p (op1, pop1))
4412 	return true;
4413 #if DEBUG_CMP
4414       fprintf(stderr, "prev cmp not same\n");
4415 #endif
4416       return false;
4417     }
4418 
4419   /* Else the previous insn must be a SET, with either the source or
4420      dest equal to operands[0], and operands[1] must be zero.  */
4421 
4422   if (!rtx_equal_p (op1, const0_rtx))
4423     {
4424 #if DEBUG_CMP
4425       fprintf(stderr, "operands[1] not const0_rtx\n");
4426 #endif
4427       return false;
4428     }
4429   if (GET_CODE (pp) != SET)
4430     {
4431 #if DEBUG_CMP
4432       fprintf (stderr, "pp not set\n");
4433 #endif
4434       return false;
4435     }
4436   if (!rtx_equal_p (op0, SET_SRC (pp))
4437       && !rtx_equal_p (op0, SET_DEST (pp)))
4438     {
4439 #if DEBUG_CMP
4440       fprintf(stderr, "operands[0] not found in set\n");
4441 #endif
4442       return false;
4443     }
4444 
4445 #if DEBUG_CMP
4446   fprintf(stderr, "cmp flags %x prev flags %x\n", flags_needed, pflags);
4447 #endif
4448   if ((pflags & flags_needed) == flags_needed)
4449     return true;
4450 
4451   return false;
4452 }
4453 
4454 /* Return the pattern for a compare.  This will be commented out if
4455    the compare is redundant, else a normal pattern is returned.  Thus,
4456    the assembler output says where the compare would have been.  */
4457 char *
4458 m32c_output_compare (rtx_insn *insn, rtx *operands)
4459 {
4460   static char templ[] = ";cmp.b\t%1,%0";
4461   /*                             ^ 5  */
4462 
4463   templ[5] = " bwll"[GET_MODE_SIZE(GET_MODE(operands[0]))];
4464   if (m32c_compare_redundant (insn, operands))
4465     {
4466 #if DEBUG_CMP
4467       fprintf(stderr, "cbranch: cmp not needed\n");
4468 #endif
4469       return templ;
4470     }
4471 
4472 #if DEBUG_CMP
4473   fprintf(stderr, "cbranch: cmp needed: `%s'\n", templ + 1);
4474 #endif
4475   return templ + 1;
4476 }
4477 
4478 #undef TARGET_ENCODE_SECTION_INFO
4479 #define TARGET_ENCODE_SECTION_INFO m32c_encode_section_info
4480 
4481 /* If the frame pointer isn't used, we detect it manually.  But the
4482    stack pointer doesn't have as flexible addressing as the frame
4483    pointer, so we always assume we have it.  */
4484 
4485 #undef TARGET_FRAME_POINTER_REQUIRED
4486 #define TARGET_FRAME_POINTER_REQUIRED hook_bool_void_true
4487 
4488 /* The Global `targetm' Variable. */
4489 
4490 struct gcc_target targetm = TARGET_INITIALIZER;
4491 
4492 #include "gt-m32c.h"
4493