xref: /netbsd-src/external/gpl3/gcc.old/dist/gcc/config/m32c/m32c.c (revision b7b7574d3bf8eeb51a1fa3977b59142ec6434a55)
1 /* Target Code for R8C/M16C/M32C
2    Copyright (C) 2005, 2006, 2007, 2008, 2009
3    Free Software Foundation, Inc.
4    Contributed by Red Hat.
5 
6    This file is part of GCC.
7 
8    GCC is free software; you can redistribute it and/or modify it
9    under the terms of the GNU General Public License as published
10    by the Free Software Foundation; either version 3, or (at your
11    option) any later version.
12 
13    GCC is distributed in the hope that it will be useful, but WITHOUT
14    ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15    or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
16    License for more details.
17 
18    You should have received a copy of the GNU General Public License
19    along with GCC; see the file COPYING3.  If not see
20    <http://www.gnu.org/licenses/>.  */
21 
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl.h"
27 #include "regs.h"
28 #include "hard-reg-set.h"
29 #include "real.h"
30 #include "insn-config.h"
31 #include "conditions.h"
32 #include "insn-flags.h"
33 #include "output.h"
34 #include "insn-attr.h"
35 #include "flags.h"
36 #include "recog.h"
37 #include "reload.h"
38 #include "toplev.h"
39 #include "obstack.h"
40 #include "tree.h"
41 #include "expr.h"
42 #include "optabs.h"
43 #include "except.h"
44 #include "function.h"
45 #include "ggc.h"
46 #include "target.h"
47 #include "target-def.h"
48 #include "tm_p.h"
49 #include "langhooks.h"
50 #include "gimple.h"
51 #include "df.h"
52 
53 /* Prototypes */
54 
55 /* Used by m32c_pushm_popm.  */
56 typedef enum
57 {
58   PP_pushm,
59   PP_popm,
60   PP_justcount
61 } Push_Pop_Type;
62 
63 static bool m32c_function_needs_enter (void);
64 static tree interrupt_handler (tree *, tree, tree, int, bool *);
65 static tree function_vector_handler (tree *, tree, tree, int, bool *);
66 static int interrupt_p (tree node);
67 static int bank_switch_p (tree node);
68 static int fast_interrupt_p (tree node);
69 static int interrupt_p (tree node);
70 static bool m32c_asm_integer (rtx, unsigned int, int);
71 static int m32c_comp_type_attributes (const_tree, const_tree);
72 static bool m32c_fixed_condition_code_regs (unsigned int *, unsigned int *);
73 static struct machine_function *m32c_init_machine_status (void);
74 static void m32c_insert_attributes (tree, tree *);
75 static bool m32c_legitimate_address_p (enum machine_mode, rtx, bool);
76 static bool m32c_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
77 				    const_tree, bool);
78 static bool m32c_promote_prototypes (const_tree);
79 static int m32c_pushm_popm (Push_Pop_Type);
80 static bool m32c_strict_argument_naming (CUMULATIVE_ARGS *);
81 static rtx m32c_struct_value_rtx (tree, int);
82 static rtx m32c_subreg (enum machine_mode, rtx, enum machine_mode, int);
83 static int need_to_save (int);
84 static rtx m32c_function_value (const_tree, const_tree, bool);
85 static rtx m32c_libcall_value (enum machine_mode, const_rtx);
86 
87 int current_function_special_page_vector (rtx);
88 
89 #define SYMBOL_FLAG_FUNCVEC_FUNCTION    (SYMBOL_FLAG_MACH_DEP << 0)
90 
91 #define streq(a,b) (strcmp ((a), (b)) == 0)
92 
93 /* Internal support routines */
94 
95 /* Debugging statements are tagged with DEBUG0 only so that they can
96    be easily enabled individually, by replacing the '0' with '1' as
97    needed.  */
98 #define DEBUG0 0
99 #define DEBUG1 1
100 
101 #if DEBUG0
102 /* This is needed by some of the commented-out debug statements
103    below.  */
104 static char const *class_names[LIM_REG_CLASSES] = REG_CLASS_NAMES;
105 #endif
106 static int class_contents[LIM_REG_CLASSES][1] = REG_CLASS_CONTENTS;
107 
108 /* These are all to support encode_pattern().  */
109 static char pattern[30], *patternp;
110 static GTY(()) rtx patternr[30];
111 #define RTX_IS(x) (streq (pattern, x))
112 
113 /* Some macros to simplify the logic throughout this file.  */
114 #define IS_MEM_REGNO(regno) ((regno) >= MEM0_REGNO && (regno) <= MEM7_REGNO)
115 #define IS_MEM_REG(rtx) (GET_CODE (rtx) == REG && IS_MEM_REGNO (REGNO (rtx)))
116 
117 #define IS_CR_REGNO(regno) ((regno) >= SB_REGNO && (regno) <= PC_REGNO)
118 #define IS_CR_REG(rtx) (GET_CODE (rtx) == REG && IS_CR_REGNO (REGNO (rtx)))
119 
120 /* We do most RTX matching by converting the RTX into a string, and
121    using string compares.  This vastly simplifies the logic in many of
122    the functions in this file.
123 
124    On exit, pattern[] has the encoded string (use RTX_IS("...") to
125    compare it) and patternr[] has pointers to the nodes in the RTX
126    corresponding to each character in the encoded string.  The latter
127    is mostly used by print_operand().
128 
129    Unrecognized patterns have '?' in them; this shows up when the
130    assembler complains about syntax errors.
131 */
132 
133 static void
134 encode_pattern_1 (rtx x)
135 {
136   int i;
137 
138   if (patternp == pattern + sizeof (pattern) - 2)
139     {
140       patternp[-1] = '?';
141       return;
142     }
143 
144   patternr[patternp - pattern] = x;
145 
146   switch (GET_CODE (x))
147     {
148     case REG:
149       *patternp++ = 'r';
150       break;
151     case SUBREG:
152       if (GET_MODE_SIZE (GET_MODE (x)) !=
153 	  GET_MODE_SIZE (GET_MODE (XEXP (x, 0))))
154 	*patternp++ = 'S';
155       encode_pattern_1 (XEXP (x, 0));
156       break;
157     case MEM:
158       *patternp++ = 'm';
159     case CONST:
160       encode_pattern_1 (XEXP (x, 0));
161       break;
162     case PLUS:
163       *patternp++ = '+';
164       encode_pattern_1 (XEXP (x, 0));
165       encode_pattern_1 (XEXP (x, 1));
166       break;
167     case PRE_DEC:
168       *patternp++ = '>';
169       encode_pattern_1 (XEXP (x, 0));
170       break;
171     case POST_INC:
172       *patternp++ = '<';
173       encode_pattern_1 (XEXP (x, 0));
174       break;
175     case LO_SUM:
176       *patternp++ = 'L';
177       encode_pattern_1 (XEXP (x, 0));
178       encode_pattern_1 (XEXP (x, 1));
179       break;
180     case HIGH:
181       *patternp++ = 'H';
182       encode_pattern_1 (XEXP (x, 0));
183       break;
184     case SYMBOL_REF:
185       *patternp++ = 's';
186       break;
187     case LABEL_REF:
188       *patternp++ = 'l';
189       break;
190     case CODE_LABEL:
191       *patternp++ = 'c';
192       break;
193     case CONST_INT:
194     case CONST_DOUBLE:
195       *patternp++ = 'i';
196       break;
197     case UNSPEC:
198       *patternp++ = 'u';
199       *patternp++ = '0' + XCINT (x, 1, UNSPEC);
200       for (i = 0; i < XVECLEN (x, 0); i++)
201 	encode_pattern_1 (XVECEXP (x, 0, i));
202       break;
203     case USE:
204       *patternp++ = 'U';
205       break;
206     case PARALLEL:
207       *patternp++ = '|';
208       for (i = 0; i < XVECLEN (x, 0); i++)
209 	encode_pattern_1 (XVECEXP (x, 0, i));
210       break;
211     case EXPR_LIST:
212       *patternp++ = 'E';
213       encode_pattern_1 (XEXP (x, 0));
214       if (XEXP (x, 1))
215 	encode_pattern_1 (XEXP (x, 1));
216       break;
217     default:
218       *patternp++ = '?';
219 #if DEBUG0
220       fprintf (stderr, "can't encode pattern %s\n",
221 	       GET_RTX_NAME (GET_CODE (x)));
222       debug_rtx (x);
223       gcc_unreachable ();
224 #endif
225       break;
226     }
227 }
228 
229 static void
230 encode_pattern (rtx x)
231 {
232   patternp = pattern;
233   encode_pattern_1 (x);
234   *patternp = 0;
235 }
236 
237 /* Since register names indicate the mode they're used in, we need a
238    way to determine which name to refer to the register with.  Called
239    by print_operand().  */
240 
241 static const char *
242 reg_name_with_mode (int regno, enum machine_mode mode)
243 {
244   int mlen = GET_MODE_SIZE (mode);
245   if (regno == R0_REGNO && mlen == 1)
246     return "r0l";
247   if (regno == R0_REGNO && (mlen == 3 || mlen == 4))
248     return "r2r0";
249   if (regno == R0_REGNO && mlen == 6)
250     return "r2r1r0";
251   if (regno == R0_REGNO && mlen == 8)
252     return "r3r1r2r0";
253   if (regno == R1_REGNO && mlen == 1)
254     return "r1l";
255   if (regno == R1_REGNO && (mlen == 3 || mlen == 4))
256     return "r3r1";
257   if (regno == A0_REGNO && TARGET_A16 && (mlen == 3 || mlen == 4))
258     return "a1a0";
259   return reg_names[regno];
260 }
261 
262 /* How many bytes a register uses on stack when it's pushed.  We need
263    to know this because the push opcode needs to explicitly indicate
264    the size of the register, even though the name of the register
265    already tells it that.  Used by m32c_output_reg_{push,pop}, which
266    is only used through calls to ASM_OUTPUT_REG_{PUSH,POP}.  */
267 
268 static int
269 reg_push_size (int regno)
270 {
271   switch (regno)
272     {
273     case R0_REGNO:
274     case R1_REGNO:
275       return 2;
276     case R2_REGNO:
277     case R3_REGNO:
278     case FLG_REGNO:
279       return 2;
280     case A0_REGNO:
281     case A1_REGNO:
282     case SB_REGNO:
283     case FB_REGNO:
284     case SP_REGNO:
285       if (TARGET_A16)
286 	return 2;
287       else
288 	return 3;
289     default:
290       gcc_unreachable ();
291     }
292 }
293 
294 static int *class_sizes = 0;
295 
296 /* Given two register classes, find the largest intersection between
297    them.  If there is no intersection, return RETURNED_IF_EMPTY
298    instead.  */
299 static int
300 reduce_class (int original_class, int limiting_class, int returned_if_empty)
301 {
302   int cc = class_contents[original_class][0];
303   int i, best = NO_REGS;
304   int best_size = 0;
305 
306   if (original_class == limiting_class)
307     return original_class;
308 
309   if (!class_sizes)
310     {
311       int r;
312       class_sizes = (int *) xmalloc (LIM_REG_CLASSES * sizeof (int));
313       for (i = 0; i < LIM_REG_CLASSES; i++)
314 	{
315 	  class_sizes[i] = 0;
316 	  for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
317 	    if (class_contents[i][0] & (1 << r))
318 	      class_sizes[i]++;
319 	}
320     }
321 
322   cc &= class_contents[limiting_class][0];
323   for (i = 0; i < LIM_REG_CLASSES; i++)
324     {
325       int ic = class_contents[i][0];
326 
327       if ((~cc & ic) == 0)
328 	if (best_size < class_sizes[i])
329 	  {
330 	    best = i;
331 	    best_size = class_sizes[i];
332 	  }
333 
334     }
335   if (best == NO_REGS)
336     return returned_if_empty;
337   return best;
338 }
339 
340 /* Returns TRUE If there are any registers that exist in both register
341    classes.  */
342 static int
343 classes_intersect (int class1, int class2)
344 {
345   return class_contents[class1][0] & class_contents[class2][0];
346 }
347 
348 /* Used by m32c_register_move_cost to determine if a move is
349    impossibly expensive.  */
350 static int
351 class_can_hold_mode (int rclass, enum machine_mode mode)
352 {
353   /* Cache the results:  0=untested  1=no  2=yes */
354   static char results[LIM_REG_CLASSES][MAX_MACHINE_MODE];
355   if (results[rclass][mode] == 0)
356     {
357       int r, n, i;
358       results[rclass][mode] = 1;
359       for (r = 0; r < FIRST_PSEUDO_REGISTER; r++)
360 	if (class_contents[rclass][0] & (1 << r)
361 	    && HARD_REGNO_MODE_OK (r, mode))
362 	  {
363 	    int ok = 1;
364 	    n = HARD_REGNO_NREGS (r, mode);
365 	    for (i = 1; i < n; i++)
366 	      if (!(class_contents[rclass][0] & (1 << (r + i))))
367 		ok = 0;
368 	    if (ok)
369 	      {
370 		results[rclass][mode] = 2;
371 		break;
372 	      }
373 	  }
374     }
375 #if DEBUG0
376   fprintf (stderr, "class %s can hold %s? %s\n",
377 	   class_names[rclass], mode_name[mode],
378 	   (results[rclass][mode] == 2) ? "yes" : "no");
379 #endif
380   return results[rclass][mode] == 2;
381 }
382 
383 /* Run-time Target Specification.  */
384 
385 /* Memregs are memory locations that gcc treats like general
386    registers, as there are a limited number of true registers and the
387    m32c families can use memory in most places that registers can be
388    used.
389 
390    However, since memory accesses are more expensive than registers,
391    we allow the user to limit the number of memregs available, in
392    order to try to persuade gcc to try harder to use real registers.
393 
394    Memregs are provided by m32c-lib1.S.
395 */
396 
397 int target_memregs = 16;
398 static bool target_memregs_set = FALSE;
399 int ok_to_change_target_memregs = TRUE;
400 
401 #undef  TARGET_HANDLE_OPTION
402 #define TARGET_HANDLE_OPTION m32c_handle_option
403 static bool
404 m32c_handle_option (size_t code,
405 		    const char *arg ATTRIBUTE_UNUSED,
406 		    int value ATTRIBUTE_UNUSED)
407 {
408   if (code == OPT_memregs_)
409     {
410       target_memregs_set = TRUE;
411       target_memregs = atoi (arg);
412     }
413   return TRUE;
414 }
415 
416 /* Implements OVERRIDE_OPTIONS.  We limit memregs to 0..16, and
417    provide a default.  */
418 void
419 m32c_override_options (void)
420 {
421   if (target_memregs_set)
422     {
423       if (target_memregs < 0 || target_memregs > 16)
424 	error ("invalid target memregs value '%d'", target_memregs);
425     }
426   else
427     target_memregs = 16;
428 
429   if (TARGET_A24)
430     flag_ivopts = 0;
431 }
432 
433 /* Defining data structures for per-function information */
434 
435 /* The usual; we set up our machine_function data.  */
436 static struct machine_function *
437 m32c_init_machine_status (void)
438 {
439   struct machine_function *machine;
440   machine =
441     (machine_function *) ggc_alloc_cleared (sizeof (machine_function));
442 
443   return machine;
444 }
445 
446 /* Implements INIT_EXPANDERS.  We just set up to call the above
447    function.  */
448 void
449 m32c_init_expanders (void)
450 {
451   init_machine_status = m32c_init_machine_status;
452 }
453 
454 /* Storage Layout */
455 
456 /* Register Basics */
457 
458 /* Basic Characteristics of Registers */
459 
460 /* Whether a mode fits in a register is complex enough to warrant a
461    table.  */
462 static struct
463 {
464   char qi_regs;
465   char hi_regs;
466   char pi_regs;
467   char si_regs;
468   char di_regs;
469 } nregs_table[FIRST_PSEUDO_REGISTER] =
470 {
471   { 1, 1, 2, 2, 4 },		/* r0 */
472   { 0, 1, 0, 0, 0 },		/* r2 */
473   { 1, 1, 2, 2, 0 },		/* r1 */
474   { 0, 1, 0, 0, 0 },		/* r3 */
475   { 0, 1, 1, 0, 0 },		/* a0 */
476   { 0, 1, 1, 0, 0 },		/* a1 */
477   { 0, 1, 1, 0, 0 },		/* sb */
478   { 0, 1, 1, 0, 0 },		/* fb */
479   { 0, 1, 1, 0, 0 },		/* sp */
480   { 1, 1, 1, 0, 0 },		/* pc */
481   { 0, 0, 0, 0, 0 },		/* fl */
482   { 1, 1, 1, 0, 0 },		/* ap */
483   { 1, 1, 2, 2, 4 },		/* mem0 */
484   { 1, 1, 2, 2, 4 },		/* mem1 */
485   { 1, 1, 2, 2, 4 },		/* mem2 */
486   { 1, 1, 2, 2, 4 },		/* mem3 */
487   { 1, 1, 2, 2, 4 },		/* mem4 */
488   { 1, 1, 2, 2, 0 },		/* mem5 */
489   { 1, 1, 2, 2, 0 },		/* mem6 */
490   { 1, 1, 0, 0, 0 },		/* mem7 */
491 };
492 
493 /* Implements CONDITIONAL_REGISTER_USAGE.  We adjust the number of
494    available memregs, and select which registers need to be preserved
495    across calls based on the chip family.  */
496 
497 void
498 m32c_conditional_register_usage (void)
499 {
500   int i;
501 
502   if (0 <= target_memregs && target_memregs <= 16)
503     {
504       /* The command line option is bytes, but our "registers" are
505 	 16-bit words.  */
506       for (i = (target_memregs+1)/2; i < 8; i++)
507 	{
508 	  fixed_regs[MEM0_REGNO + i] = 1;
509 	  CLEAR_HARD_REG_BIT (reg_class_contents[MEM_REGS], MEM0_REGNO + i);
510 	}
511     }
512 
513   /* M32CM and M32C preserve more registers across function calls.  */
514   if (TARGET_A24)
515     {
516       call_used_regs[R1_REGNO] = 0;
517       call_used_regs[R2_REGNO] = 0;
518       call_used_regs[R3_REGNO] = 0;
519       call_used_regs[A0_REGNO] = 0;
520       call_used_regs[A1_REGNO] = 0;
521     }
522 }
523 
524 /* How Values Fit in Registers */
525 
526 /* Implements HARD_REGNO_NREGS.  This is complicated by the fact that
527    different registers are different sizes from each other, *and* may
528    be different sizes in different chip families.  */
529 static int
530 m32c_hard_regno_nregs_1 (int regno, enum machine_mode mode)
531 {
532   if (regno == FLG_REGNO && mode == CCmode)
533     return 1;
534   if (regno >= FIRST_PSEUDO_REGISTER)
535     return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
536 
537   if (regno >= MEM0_REGNO && regno <= MEM7_REGNO)
538     return (GET_MODE_SIZE (mode) + 1) / 2;
539 
540   if (GET_MODE_SIZE (mode) <= 1)
541     return nregs_table[regno].qi_regs;
542   if (GET_MODE_SIZE (mode) <= 2)
543     return nregs_table[regno].hi_regs;
544   if (regno == A0_REGNO && mode == PSImode && TARGET_A16)
545     return 2;
546   if ((GET_MODE_SIZE (mode) <= 3 || mode == PSImode) && TARGET_A24)
547     return nregs_table[regno].pi_regs;
548   if (GET_MODE_SIZE (mode) <= 4)
549     return nregs_table[regno].si_regs;
550   if (GET_MODE_SIZE (mode) <= 8)
551     return nregs_table[regno].di_regs;
552   return 0;
553 }
554 
555 int
556 m32c_hard_regno_nregs (int regno, enum machine_mode mode)
557 {
558   int rv = m32c_hard_regno_nregs_1 (regno, mode);
559   return rv ? rv : 1;
560 }
561 
562 /* Implements HARD_REGNO_MODE_OK.  The above function does the work
563    already; just test its return value.  */
564 int
565 m32c_hard_regno_ok (int regno, enum machine_mode mode)
566 {
567   return m32c_hard_regno_nregs_1 (regno, mode) != 0;
568 }
569 
570 /* Implements MODES_TIEABLE_P.  In general, modes aren't tieable since
571    registers are all different sizes.  However, since most modes are
572    bigger than our registers anyway, it's easier to implement this
573    function that way, leaving QImode as the only unique case.  */
574 int
575 m32c_modes_tieable_p (enum machine_mode m1, enum machine_mode m2)
576 {
577   if (GET_MODE_SIZE (m1) == GET_MODE_SIZE (m2))
578     return 1;
579 
580 #if 0
581   if (m1 == QImode || m2 == QImode)
582     return 0;
583 #endif
584 
585   return 1;
586 }
587 
588 /* Register Classes */
589 
590 /* Implements REGNO_REG_CLASS.  */
591 enum machine_mode
592 m32c_regno_reg_class (int regno)
593 {
594   switch (regno)
595     {
596     case R0_REGNO:
597       return R0_REGS;
598     case R1_REGNO:
599       return R1_REGS;
600     case R2_REGNO:
601       return R2_REGS;
602     case R3_REGNO:
603       return R3_REGS;
604     case A0_REGNO:
605     case A1_REGNO:
606       return A_REGS;
607     case SB_REGNO:
608       return SB_REGS;
609     case FB_REGNO:
610       return FB_REGS;
611     case SP_REGNO:
612       return SP_REGS;
613     case FLG_REGNO:
614       return FLG_REGS;
615     default:
616       if (IS_MEM_REGNO (regno))
617 	return MEM_REGS;
618       return ALL_REGS;
619     }
620 }
621 
622 /* Implements REG_CLASS_FROM_CONSTRAINT.  Note that some constraints only match
623    for certain chip families.  */
624 int
625 m32c_reg_class_from_constraint (char c ATTRIBUTE_UNUSED, const char *s)
626 {
627   if (memcmp (s, "Rsp", 3) == 0)
628     return SP_REGS;
629   if (memcmp (s, "Rfb", 3) == 0)
630     return FB_REGS;
631   if (memcmp (s, "Rsb", 3) == 0)
632     return SB_REGS;
633   if (memcmp (s, "Rcr", 3) == 0)
634     return TARGET_A16 ? CR_REGS : NO_REGS;
635   if (memcmp (s, "Rcl", 3) == 0)
636     return TARGET_A24 ? CR_REGS : NO_REGS;
637   if (memcmp (s, "R0w", 3) == 0)
638     return R0_REGS;
639   if (memcmp (s, "R1w", 3) == 0)
640     return R1_REGS;
641   if (memcmp (s, "R2w", 3) == 0)
642     return R2_REGS;
643   if (memcmp (s, "R3w", 3) == 0)
644     return R3_REGS;
645   if (memcmp (s, "R02", 3) == 0)
646     return R02_REGS;
647   if (memcmp (s, "R13", 3) == 0)
648     return R13_REGS;
649   if (memcmp (s, "R03", 3) == 0)
650     return R03_REGS;
651   if (memcmp (s, "Rdi", 3) == 0)
652     return DI_REGS;
653   if (memcmp (s, "Rhl", 3) == 0)
654     return HL_REGS;
655   if (memcmp (s, "R23", 3) == 0)
656     return R23_REGS;
657   if (memcmp (s, "Ra0", 3) == 0)
658     return A0_REGS;
659   if (memcmp (s, "Ra1", 3) == 0)
660     return A1_REGS;
661   if (memcmp (s, "Raa", 3) == 0)
662     return A_REGS;
663   if (memcmp (s, "Raw", 3) == 0)
664     return TARGET_A16 ? A_REGS : NO_REGS;
665   if (memcmp (s, "Ral", 3) == 0)
666     return TARGET_A24 ? A_REGS : NO_REGS;
667   if (memcmp (s, "Rqi", 3) == 0)
668     return QI_REGS;
669   if (memcmp (s, "Rad", 3) == 0)
670     return AD_REGS;
671   if (memcmp (s, "Rsi", 3) == 0)
672     return SI_REGS;
673   if (memcmp (s, "Rhi", 3) == 0)
674     return HI_REGS;
675   if (memcmp (s, "Rhc", 3) == 0)
676     return HC_REGS;
677   if (memcmp (s, "Rra", 3) == 0)
678     return RA_REGS;
679   if (memcmp (s, "Rfl", 3) == 0)
680     return FLG_REGS;
681   if (memcmp (s, "Rmm", 3) == 0)
682     {
683       if (fixed_regs[MEM0_REGNO])
684 	return NO_REGS;
685       return MEM_REGS;
686     }
687 
688   /* PSImode registers - i.e. whatever can hold a pointer.  */
689   if (memcmp (s, "Rpi", 3) == 0)
690     {
691       if (TARGET_A16)
692 	return HI_REGS;
693       else
694 	return RA_REGS; /* r2r0 and r3r1 can hold pointers.  */
695     }
696 
697   /* We handle this one as an EXTRA_CONSTRAINT.  */
698   if (memcmp (s, "Rpa", 3) == 0)
699     return NO_REGS;
700 
701   if (*s == 'R')
702     {
703       fprintf(stderr, "unrecognized R constraint: %.3s\n", s);
704       gcc_unreachable();
705     }
706 
707   return NO_REGS;
708 }
709 
710 /* Implements REGNO_OK_FOR_BASE_P.  */
711 int
712 m32c_regno_ok_for_base_p (int regno)
713 {
714   if (regno == A0_REGNO
715       || regno == A1_REGNO || regno >= FIRST_PSEUDO_REGISTER)
716     return 1;
717   return 0;
718 }
719 
720 #define DEBUG_RELOAD 0
721 
722 /* Implements PREFERRED_RELOAD_CLASS.  In general, prefer general
723    registers of the appropriate size.  */
724 int
725 m32c_preferred_reload_class (rtx x, int rclass)
726 {
727   int newclass = rclass;
728 
729 #if DEBUG_RELOAD
730   fprintf (stderr, "\npreferred_reload_class for %s is ",
731 	   class_names[rclass]);
732 #endif
733   if (rclass == NO_REGS)
734     rclass = GET_MODE (x) == QImode ? HL_REGS : R03_REGS;
735 
736   if (classes_intersect (rclass, CR_REGS))
737     {
738       switch (GET_MODE (x))
739 	{
740 	case QImode:
741 	  newclass = HL_REGS;
742 	  break;
743 	default:
744 	  /*      newclass = HI_REGS; */
745 	  break;
746 	}
747     }
748 
749   else if (newclass == QI_REGS && GET_MODE_SIZE (GET_MODE (x)) > 2)
750     newclass = SI_REGS;
751   else if (GET_MODE_SIZE (GET_MODE (x)) > 4
752 	   && ~class_contents[rclass][0] & 0x000f)
753     newclass = DI_REGS;
754 
755   rclass = reduce_class (rclass, newclass, rclass);
756 
757   if (GET_MODE (x) == QImode)
758     rclass = reduce_class (rclass, HL_REGS, rclass);
759 
760 #if DEBUG_RELOAD
761   fprintf (stderr, "%s\n", class_names[rclass]);
762   debug_rtx (x);
763 
764   if (GET_CODE (x) == MEM
765       && GET_CODE (XEXP (x, 0)) == PLUS
766       && GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS)
767     fprintf (stderr, "Glorm!\n");
768 #endif
769   return rclass;
770 }
771 
772 /* Implements PREFERRED_OUTPUT_RELOAD_CLASS.  */
773 int
774 m32c_preferred_output_reload_class (rtx x, int rclass)
775 {
776   return m32c_preferred_reload_class (x, rclass);
777 }
778 
779 /* Implements LIMIT_RELOAD_CLASS.  We basically want to avoid using
780    address registers for reloads since they're needed for address
781    reloads.  */
782 int
783 m32c_limit_reload_class (enum machine_mode mode, int rclass)
784 {
785 #if DEBUG_RELOAD
786   fprintf (stderr, "limit_reload_class for %s: %s ->",
787 	   mode_name[mode], class_names[rclass]);
788 #endif
789 
790   if (mode == QImode)
791     rclass = reduce_class (rclass, HL_REGS, rclass);
792   else if (mode == HImode)
793     rclass = reduce_class (rclass, HI_REGS, rclass);
794   else if (mode == SImode)
795     rclass = reduce_class (rclass, SI_REGS, rclass);
796 
797   if (rclass != A_REGS)
798     rclass = reduce_class (rclass, DI_REGS, rclass);
799 
800 #if DEBUG_RELOAD
801   fprintf (stderr, " %s\n", class_names[rclass]);
802 #endif
803   return rclass;
804 }
805 
806 /* Implements SECONDARY_RELOAD_CLASS.  QImode have to be reloaded in
807    r0 or r1, as those are the only real QImode registers.  CR regs get
808    reloaded through appropriately sized general or address
809    registers.  */
810 int
811 m32c_secondary_reload_class (int rclass, enum machine_mode mode, rtx x)
812 {
813   int cc = class_contents[rclass][0];
814 #if DEBUG0
815   fprintf (stderr, "\nsecondary reload class %s %s\n",
816 	   class_names[rclass], mode_name[mode]);
817   debug_rtx (x);
818 #endif
819   if (mode == QImode
820       && GET_CODE (x) == MEM && (cc & ~class_contents[R23_REGS][0]) == 0)
821     return QI_REGS;
822   if (classes_intersect (rclass, CR_REGS)
823       && GET_CODE (x) == REG
824       && REGNO (x) >= SB_REGNO && REGNO (x) <= SP_REGNO)
825     return TARGET_A16 ? HI_REGS : A_REGS;
826   return NO_REGS;
827 }
828 
829 /* Implements CLASS_LIKELY_SPILLED_P.  A_REGS is needed for address
830    reloads.  */
831 int
832 m32c_class_likely_spilled_p (int regclass)
833 {
834   if (regclass == A_REGS)
835     return 1;
836   return reg_class_size[regclass] == 1;
837 }
838 
839 /* Implements CLASS_MAX_NREGS.  We calculate this according to its
840    documented meaning, to avoid potential inconsistencies with actual
841    class definitions.  */
842 int
843 m32c_class_max_nregs (int regclass, enum machine_mode mode)
844 {
845   int rn, max = 0;
846 
847   for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
848     if (class_contents[regclass][0] & (1 << rn))
849       {
850 	int n = m32c_hard_regno_nregs (rn, mode);
851 	if (max < n)
852 	  max = n;
853       }
854   return max;
855 }
856 
857 /* Implements CANNOT_CHANGE_MODE_CLASS.  Only r0 and r1 can change to
858    QI (r0l, r1l) because the chip doesn't support QI ops on other
859    registers (well, it does on a0/a1 but if we let gcc do that, reload
860    suffers).  Otherwise, we allow changes to larger modes.  */
861 int
862 m32c_cannot_change_mode_class (enum machine_mode from,
863 			       enum machine_mode to, int rclass)
864 {
865   int rn;
866 #if DEBUG0
867   fprintf (stderr, "cannot change from %s to %s in %s\n",
868 	   mode_name[from], mode_name[to], class_names[rclass]);
869 #endif
870 
871   /* If the larger mode isn't allowed in any of these registers, we
872      can't allow the change.  */
873   for (rn = 0; rn < FIRST_PSEUDO_REGISTER; rn++)
874     if (class_contents[rclass][0] & (1 << rn))
875       if (! m32c_hard_regno_ok (rn, to))
876 	return 1;
877 
878   if (to == QImode)
879     return (class_contents[rclass][0] & 0x1ffa);
880 
881   if (class_contents[rclass][0] & 0x0005	/* r0, r1 */
882       && GET_MODE_SIZE (from) > 1)
883     return 0;
884   if (GET_MODE_SIZE (from) > 2)	/* all other regs */
885     return 0;
886 
887   return 1;
888 }
889 
890 /* Helpers for the rest of the file.  */
891 /* TRUE if the rtx is a REG rtx for the given register.  */
892 #define IS_REG(rtx,regno) (GET_CODE (rtx) == REG \
893 			   && REGNO (rtx) == regno)
894 /* TRUE if the rtx is a pseudo - specifically, one we can use as a
895    base register in address calculations (hence the "strict"
896    argument).  */
897 #define IS_PSEUDO(rtx,strict) (!strict && GET_CODE (rtx) == REG \
898 			       && (REGNO (rtx) == AP_REGNO \
899 				   || REGNO (rtx) >= FIRST_PSEUDO_REGISTER))
900 
901 /* Implements CONST_OK_FOR_CONSTRAINT_P.  Currently, all constant
902    constraints start with 'I', with the next two characters indicating
903    the type and size of the range allowed.  */
904 int
905 m32c_const_ok_for_constraint_p (HOST_WIDE_INT value,
906 				char c ATTRIBUTE_UNUSED, const char *str)
907 {
908   /* s=signed u=unsigned n=nonzero m=minus l=log2able,
909      [sun] bits [SUN] bytes, p=pointer size
910      I[-0-9][0-9] matches that number */
911   if (memcmp (str, "Is3", 3) == 0)
912     {
913       return (-8 <= value && value <= 7);
914     }
915   if (memcmp (str, "IS1", 3) == 0)
916     {
917       return (-128 <= value && value <= 127);
918     }
919   if (memcmp (str, "IS2", 3) == 0)
920     {
921       return (-32768 <= value && value <= 32767);
922     }
923   if (memcmp (str, "IU2", 3) == 0)
924     {
925       return (0 <= value && value <= 65535);
926     }
927   if (memcmp (str, "IU3", 3) == 0)
928     {
929       return (0 <= value && value <= 0x00ffffff);
930     }
931   if (memcmp (str, "In4", 3) == 0)
932     {
933       return (-8 <= value && value && value <= 8);
934     }
935   if (memcmp (str, "In5", 3) == 0)
936     {
937       return (-16 <= value && value && value <= 16);
938     }
939   if (memcmp (str, "In6", 3) == 0)
940     {
941       return (-32 <= value && value && value <= 32);
942     }
943   if (memcmp (str, "IM2", 3) == 0)
944     {
945       return (-65536 <= value && value && value <= -1);
946     }
947   if (memcmp (str, "Ilb", 3) == 0)
948     {
949       int b = exact_log2 (value);
950       return (b >= 0 && b <= 7);
951     }
952   if (memcmp (str, "Imb", 3) == 0)
953     {
954       int b = exact_log2 ((value ^ 0xff) & 0xff);
955       return (b >= 0 && b <= 7);
956     }
957   if (memcmp (str, "ImB", 3) == 0)
958     {
959       int b = exact_log2 ((value ^ 0xffff) & 0xffff);
960       return (b >= 0 && b <= 7);
961     }
962   if (memcmp (str, "Ilw", 3) == 0)
963     {
964       int b = exact_log2 (value);
965       return (b >= 0 && b <= 15);
966     }
967   if (memcmp (str, "Imw", 3) == 0)
968     {
969       int b = exact_log2 ((value ^ 0xffff) & 0xffff);
970       return (b >= 0 && b <= 15);
971     }
972   if (memcmp (str, "I00", 3) == 0)
973     {
974       return (value == 0);
975     }
976   return 0;
977 }
978 
979 /* Implements EXTRA_CONSTRAINT_STR (see next function too).  'S' is
980    for memory constraints, plus "Rpa" for PARALLEL rtx's we use for
981    call return values.  */
982 int
983 m32c_extra_constraint_p2 (rtx value, char c ATTRIBUTE_UNUSED, const char *str)
984 {
985   encode_pattern (value);
986   if (memcmp (str, "Sd", 2) == 0)
987     {
988       /* This is the common "src/dest" address */
989       rtx r;
990       if (GET_CODE (value) == MEM && CONSTANT_P (XEXP (value, 0)))
991 	return 1;
992       if (RTX_IS ("ms") || RTX_IS ("m+si"))
993 	return 1;
994       if (RTX_IS ("m++rii"))
995 	{
996 	  if (REGNO (patternr[3]) == FB_REGNO
997 	      && INTVAL (patternr[4]) == 0)
998 	    return 1;
999 	}
1000       if (RTX_IS ("mr"))
1001 	r = patternr[1];
1002       else if (RTX_IS ("m+ri") || RTX_IS ("m+rs") || RTX_IS ("m+r+si"))
1003 	r = patternr[2];
1004       else
1005 	return 0;
1006       if (REGNO (r) == SP_REGNO)
1007 	return 0;
1008       return m32c_legitimate_address_p (GET_MODE (value), XEXP (value, 0), 1);
1009     }
1010   else if (memcmp (str, "Sa", 2) == 0)
1011     {
1012       rtx r;
1013       if (RTX_IS ("mr"))
1014 	r = patternr[1];
1015       else if (RTX_IS ("m+ri"))
1016 	r = patternr[2];
1017       else
1018 	return 0;
1019       return (IS_REG (r, A0_REGNO) || IS_REG (r, A1_REGNO));
1020     }
1021   else if (memcmp (str, "Si", 2) == 0)
1022     {
1023       return (RTX_IS ("mi") || RTX_IS ("ms") || RTX_IS ("m+si"));
1024     }
1025   else if (memcmp (str, "Ss", 2) == 0)
1026     {
1027       return ((RTX_IS ("mr")
1028 	       && (IS_REG (patternr[1], SP_REGNO)))
1029 	      || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SP_REGNO))));
1030     }
1031   else if (memcmp (str, "Sf", 2) == 0)
1032     {
1033       return ((RTX_IS ("mr")
1034 	       && (IS_REG (patternr[1], FB_REGNO)))
1035 	      || (RTX_IS ("m+ri") && (IS_REG (patternr[2], FB_REGNO))));
1036     }
1037   else if (memcmp (str, "Sb", 2) == 0)
1038     {
1039       return ((RTX_IS ("mr")
1040 	       && (IS_REG (patternr[1], SB_REGNO)))
1041 	      || (RTX_IS ("m+ri") && (IS_REG (patternr[2], SB_REGNO))));
1042     }
1043   else if (memcmp (str, "Sp", 2) == 0)
1044     {
1045       /* Absolute addresses 0..0x1fff used for bit addressing (I/O ports) */
1046       return (RTX_IS ("mi")
1047 	      && !(INTVAL (patternr[1]) & ~0x1fff));
1048     }
1049   else if (memcmp (str, "S1", 2) == 0)
1050     {
1051       return r1h_operand (value, QImode);
1052     }
1053 
1054   gcc_assert (str[0] != 'S');
1055 
1056   if (memcmp (str, "Rpa", 2) == 0)
1057     return GET_CODE (value) == PARALLEL;
1058 
1059   return 0;
1060 }
1061 
1062 /* This is for when we're debugging the above.  */
1063 int
1064 m32c_extra_constraint_p (rtx value, char c, const char *str)
1065 {
1066   int rv = m32c_extra_constraint_p2 (value, c, str);
1067 #if DEBUG0
1068   fprintf (stderr, "\nconstraint %.*s: %d\n", CONSTRAINT_LEN (c, str), str,
1069 	   rv);
1070   debug_rtx (value);
1071 #endif
1072   return rv;
1073 }
1074 
1075 /* Implements EXTRA_MEMORY_CONSTRAINT.  Currently, we only use strings
1076    starting with 'S'.  */
1077 int
1078 m32c_extra_memory_constraint (char c, const char *str ATTRIBUTE_UNUSED)
1079 {
1080   return c == 'S';
1081 }
1082 
1083 /* Implements EXTRA_ADDRESS_CONSTRAINT.  We reserve 'A' strings for these,
1084    but don't currently define any.  */
1085 int
1086 m32c_extra_address_constraint (char c, const char *str ATTRIBUTE_UNUSED)
1087 {
1088   return c == 'A';
1089 }
1090 
1091 /* STACK AND CALLING */
1092 
1093 /* Frame Layout */
1094 
1095 /* Implements RETURN_ADDR_RTX.  Note that R8C and M16C push 24 bits
1096    (yes, THREE bytes) onto the stack for the return address, but we
1097    don't support pointers bigger than 16 bits on those chips.  This
1098    will likely wreak havoc with exception unwinding.  FIXME.  */
1099 rtx
1100 m32c_return_addr_rtx (int count)
1101 {
1102   enum machine_mode mode;
1103   int offset;
1104   rtx ra_mem;
1105 
1106   if (count)
1107     return NULL_RTX;
1108   /* we want 2[$fb] */
1109 
1110   if (TARGET_A24)
1111     {
1112       /* It's four bytes */
1113       mode = PSImode;
1114       offset = 4;
1115     }
1116   else
1117     {
1118       /* FIXME: it's really 3 bytes */
1119       mode = HImode;
1120       offset = 2;
1121     }
1122 
1123   ra_mem =
1124     gen_rtx_MEM (mode, plus_constant (gen_rtx_REG (Pmode, FP_REGNO), offset));
1125   return copy_to_mode_reg (mode, ra_mem);
1126 }
1127 
1128 /* Implements INCOMING_RETURN_ADDR_RTX.  See comment above.  */
1129 rtx
1130 m32c_incoming_return_addr_rtx (void)
1131 {
1132   /* we want [sp] */
1133   return gen_rtx_MEM (PSImode, gen_rtx_REG (PSImode, SP_REGNO));
1134 }
1135 
1136 /* Exception Handling Support */
1137 
1138 /* Implements EH_RETURN_DATA_REGNO.  Choose registers able to hold
1139    pointers.  */
1140 int
1141 m32c_eh_return_data_regno (int n)
1142 {
1143   switch (n)
1144     {
1145     case 0:
1146       return A0_REGNO;
1147     case 1:
1148       if (TARGET_A16)
1149 	return R3_REGNO;
1150       else
1151 	return R1_REGNO;
1152     default:
1153       return INVALID_REGNUM;
1154     }
1155 }
1156 
1157 /* Implements EH_RETURN_STACKADJ_RTX.  Saved and used later in
1158    m32c_emit_eh_epilogue.  */
1159 rtx
1160 m32c_eh_return_stackadj_rtx (void)
1161 {
1162   if (!cfun->machine->eh_stack_adjust)
1163     {
1164       rtx sa;
1165 
1166       sa = gen_rtx_REG (Pmode, R0_REGNO);
1167       cfun->machine->eh_stack_adjust = sa;
1168     }
1169   return cfun->machine->eh_stack_adjust;
1170 }
1171 
1172 /* Registers That Address the Stack Frame */
1173 
1174 /* Implements DWARF_FRAME_REGNUM and DBX_REGISTER_NUMBER.  Note that
1175    the original spec called for dwarf numbers to vary with register
1176    width as well, for example, r0l, r0, and r2r0 would each have
1177    different dwarf numbers.  GCC doesn't support this, and we don't do
1178    it, and gdb seems to like it this way anyway.  */
1179 unsigned int
1180 m32c_dwarf_frame_regnum (int n)
1181 {
1182   switch (n)
1183     {
1184     case R0_REGNO:
1185       return 5;
1186     case R1_REGNO:
1187       return 6;
1188     case R2_REGNO:
1189       return 7;
1190     case R3_REGNO:
1191       return 8;
1192     case A0_REGNO:
1193       return 9;
1194     case A1_REGNO:
1195       return 10;
1196     case FB_REGNO:
1197       return 11;
1198     case SB_REGNO:
1199       return 19;
1200 
1201     case SP_REGNO:
1202       return 12;
1203     case PC_REGNO:
1204       return 13;
1205     default:
1206       return DWARF_FRAME_REGISTERS + 1;
1207     }
1208 }
1209 
1210 /* The frame looks like this:
1211 
1212    ap -> +------------------------------
1213          | Return address (3 or 4 bytes)
1214 	 | Saved FB (2 or 4 bytes)
1215    fb -> +------------------------------
1216 	 | local vars
1217          | register saves fb
1218 	 |        through r0 as needed
1219    sp -> +------------------------------
1220 */
1221 
1222 /* We use this to wrap all emitted insns in the prologue.  */
1223 static rtx
1224 F (rtx x)
1225 {
1226   RTX_FRAME_RELATED_P (x) = 1;
1227   return x;
1228 }
1229 
1230 /* This maps register numbers to the PUSHM/POPM bitfield, and tells us
1231    how much the stack pointer moves for each, for each cpu family.  */
1232 static struct
1233 {
1234   int reg1;
1235   int bit;
1236   int a16_bytes;
1237   int a24_bytes;
1238 } pushm_info[] =
1239 {
1240   /* These are in reverse push (nearest-to-sp) order.  */
1241   { R0_REGNO, 0x80, 2, 2 },
1242   { R1_REGNO, 0x40, 2, 2 },
1243   { R2_REGNO, 0x20, 2, 2 },
1244   { R3_REGNO, 0x10, 2, 2 },
1245   { A0_REGNO, 0x08, 2, 4 },
1246   { A1_REGNO, 0x04, 2, 4 },
1247   { SB_REGNO, 0x02, 2, 4 },
1248   { FB_REGNO, 0x01, 2, 4 }
1249 };
1250 
1251 #define PUSHM_N (sizeof(pushm_info)/sizeof(pushm_info[0]))
1252 
1253 /* Returns TRUE if we need to save/restore the given register.  We
1254    save everything for exception handlers, so that any register can be
1255    unwound.  For interrupt handlers, we save everything if the handler
1256    calls something else (because we don't know what *that* function
1257    might do), but try to be a bit smarter if the handler is a leaf
1258    function.  We always save $a0, though, because we use that in the
1259    epilogue to copy $fb to $sp.  */
1260 static int
1261 need_to_save (int regno)
1262 {
1263   if (fixed_regs[regno])
1264     return 0;
1265   if (crtl->calls_eh_return)
1266     return 1;
1267   if (regno == FP_REGNO)
1268     return 0;
1269   if (cfun->machine->is_interrupt
1270       && (!cfun->machine->is_leaf
1271 	  || (regno == A0_REGNO
1272 	      && m32c_function_needs_enter ())
1273 	  ))
1274     return 1;
1275   if (df_regs_ever_live_p (regno)
1276       && (!call_used_regs[regno] || cfun->machine->is_interrupt))
1277     return 1;
1278   return 0;
1279 }
1280 
1281 /* This function contains all the intelligence about saving and
1282    restoring registers.  It always figures out the register save set.
1283    When called with PP_justcount, it merely returns the size of the
1284    save set (for eliminating the frame pointer, for example).  When
1285    called with PP_pushm or PP_popm, it emits the appropriate
1286    instructions for saving (pushm) or restoring (popm) the
1287    registers.  */
1288 static int
1289 m32c_pushm_popm (Push_Pop_Type ppt)
1290 {
1291   int reg_mask = 0;
1292   int byte_count = 0, bytes;
1293   int i;
1294   rtx dwarf_set[PUSHM_N];
1295   int n_dwarfs = 0;
1296   int nosave_mask = 0;
1297 
1298   if (crtl->return_rtx
1299       && GET_CODE (crtl->return_rtx) == PARALLEL
1300       && !(crtl->calls_eh_return || cfun->machine->is_interrupt))
1301     {
1302       rtx exp = XVECEXP (crtl->return_rtx, 0, 0);
1303       rtx rv = XEXP (exp, 0);
1304       int rv_bytes = GET_MODE_SIZE (GET_MODE (rv));
1305 
1306       if (rv_bytes > 2)
1307 	nosave_mask |= 0x20;	/* PSI, SI */
1308       else
1309 	nosave_mask |= 0xf0;	/* DF */
1310       if (rv_bytes > 4)
1311 	nosave_mask |= 0x50;	/* DI */
1312     }
1313 
1314   for (i = 0; i < (int) PUSHM_N; i++)
1315     {
1316       /* Skip if neither register needs saving.  */
1317       if (!need_to_save (pushm_info[i].reg1))
1318 	continue;
1319 
1320       if (pushm_info[i].bit & nosave_mask)
1321 	continue;
1322 
1323       reg_mask |= pushm_info[i].bit;
1324       bytes = TARGET_A16 ? pushm_info[i].a16_bytes : pushm_info[i].a24_bytes;
1325 
1326       if (ppt == PP_pushm)
1327 	{
1328 	  enum machine_mode mode = (bytes == 2) ? HImode : SImode;
1329 	  rtx addr;
1330 
1331 	  /* Always use stack_pointer_rtx instead of calling
1332 	     rtx_gen_REG ourselves.  Code elsewhere in GCC assumes
1333 	     that there is a single rtx representing the stack pointer,
1334 	     namely stack_pointer_rtx, and uses == to recognize it.  */
1335 	  addr = stack_pointer_rtx;
1336 
1337 	  if (byte_count != 0)
1338 	    addr = gen_rtx_PLUS (GET_MODE (addr), addr, GEN_INT (byte_count));
1339 
1340 	  dwarf_set[n_dwarfs++] =
1341 	    gen_rtx_SET (VOIDmode,
1342 			 gen_rtx_MEM (mode, addr),
1343 			 gen_rtx_REG (mode, pushm_info[i].reg1));
1344 	  F (dwarf_set[n_dwarfs - 1]);
1345 
1346 	}
1347       byte_count += bytes;
1348     }
1349 
1350   if (cfun->machine->is_interrupt)
1351     {
1352       cfun->machine->intr_pushm = reg_mask & 0xfe;
1353       reg_mask = 0;
1354       byte_count = 0;
1355     }
1356 
1357   if (cfun->machine->is_interrupt)
1358     for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1359       if (need_to_save (i))
1360 	{
1361 	  byte_count += 2;
1362 	  cfun->machine->intr_pushmem[i - MEM0_REGNO] = 1;
1363 	}
1364 
1365   if (ppt == PP_pushm && byte_count)
1366     {
1367       rtx note = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (n_dwarfs + 1));
1368       rtx pushm;
1369 
1370       if (reg_mask)
1371 	{
1372 	  XVECEXP (note, 0, 0)
1373 	    = gen_rtx_SET (VOIDmode,
1374 			   stack_pointer_rtx,
1375 			   gen_rtx_PLUS (GET_MODE (stack_pointer_rtx),
1376 					 stack_pointer_rtx,
1377 					 GEN_INT (-byte_count)));
1378 	  F (XVECEXP (note, 0, 0));
1379 
1380 	  for (i = 0; i < n_dwarfs; i++)
1381 	    XVECEXP (note, 0, i + 1) = dwarf_set[i];
1382 
1383 	  pushm = F (emit_insn (gen_pushm (GEN_INT (reg_mask))));
1384 
1385 	  REG_NOTES (pushm) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, note,
1386 						 REG_NOTES (pushm));
1387 	}
1388 
1389       if (cfun->machine->is_interrupt)
1390 	for (i = MEM0_REGNO; i <= MEM7_REGNO; i++)
1391 	  if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1392 	    {
1393 	      if (TARGET_A16)
1394 		pushm = emit_insn (gen_pushhi_16 (gen_rtx_REG (HImode, i)));
1395 	      else
1396 		pushm = emit_insn (gen_pushhi_24 (gen_rtx_REG (HImode, i)));
1397 	      F (pushm);
1398 	    }
1399     }
1400   if (ppt == PP_popm && byte_count)
1401     {
1402       if (cfun->machine->is_interrupt)
1403 	for (i = MEM7_REGNO; i >= MEM0_REGNO; i--)
1404 	  if (cfun->machine->intr_pushmem[i - MEM0_REGNO])
1405 	    {
1406 	      if (TARGET_A16)
1407 		emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, i)));
1408 	      else
1409 		emit_insn (gen_pophi_24 (gen_rtx_REG (HImode, i)));
1410 	    }
1411       if (reg_mask)
1412 	emit_insn (gen_popm (GEN_INT (reg_mask)));
1413     }
1414 
1415   return byte_count;
1416 }
1417 
1418 /* Implements INITIAL_ELIMINATION_OFFSET.  See the comment above that
1419    diagrams our call frame.  */
1420 int
1421 m32c_initial_elimination_offset (int from, int to)
1422 {
1423   int ofs = 0;
1424 
1425   if (from == AP_REGNO)
1426     {
1427       if (TARGET_A16)
1428 	ofs += 5;
1429       else
1430 	ofs += 8;
1431     }
1432 
1433   if (to == SP_REGNO)
1434     {
1435       ofs += m32c_pushm_popm (PP_justcount);
1436       ofs += get_frame_size ();
1437     }
1438 
1439   /* Account for push rounding.  */
1440   if (TARGET_A24)
1441     ofs = (ofs + 1) & ~1;
1442 #if DEBUG0
1443   fprintf (stderr, "initial_elimination_offset from=%d to=%d, ofs=%d\n", from,
1444 	   to, ofs);
1445 #endif
1446   return ofs;
1447 }
1448 
1449 /* Passing Function Arguments on the Stack */
1450 
1451 /* Implements PUSH_ROUNDING.  The R8C and M16C have byte stacks, the
1452    M32C has word stacks.  */
1453 int
1454 m32c_push_rounding (int n)
1455 {
1456   if (TARGET_R8C || TARGET_M16C)
1457     return n;
1458   return (n + 1) & ~1;
1459 }
1460 
1461 /* Passing Arguments in Registers */
1462 
1463 /* Implements FUNCTION_ARG.  Arguments are passed partly in registers,
1464    partly on stack.  If our function returns a struct, a pointer to a
1465    buffer for it is at the top of the stack (last thing pushed).  The
1466    first few real arguments may be in registers as follows:
1467 
1468    R8C/M16C:	arg1 in r1 if it's QI or HI (else it's pushed on stack)
1469 		arg2 in r2 if it's HI (else pushed on stack)
1470 		rest on stack
1471    M32C:        arg1 in r0 if it's QI or HI (else it's pushed on stack)
1472 		rest on stack
1473 
1474    Structs are not passed in registers, even if they fit.  Only
1475    integer and pointer types are passed in registers.
1476 
1477    Note that when arg1 doesn't fit in r1, arg2 may still be passed in
1478    r2 if it fits.  */
1479 rtx
1480 m32c_function_arg (CUMULATIVE_ARGS * ca,
1481 		   enum machine_mode mode, tree type, int named)
1482 {
1483   /* Can return a reg, parallel, or 0 for stack */
1484   rtx rv = NULL_RTX;
1485 #if DEBUG0
1486   fprintf (stderr, "func_arg %d (%s, %d)\n",
1487 	   ca->parm_num, mode_name[mode], named);
1488   debug_tree (type);
1489 #endif
1490 
1491   if (mode == VOIDmode)
1492     return GEN_INT (0);
1493 
1494   if (ca->force_mem || !named)
1495     {
1496 #if DEBUG0
1497       fprintf (stderr, "func arg: force %d named %d, mem\n", ca->force_mem,
1498 	       named);
1499 #endif
1500       return NULL_RTX;
1501     }
1502 
1503   if (type && INTEGRAL_TYPE_P (type) && POINTER_TYPE_P (type))
1504     return NULL_RTX;
1505 
1506   if (type && AGGREGATE_TYPE_P (type))
1507     return NULL_RTX;
1508 
1509   switch (ca->parm_num)
1510     {
1511     case 1:
1512       if (GET_MODE_SIZE (mode) == 1 || GET_MODE_SIZE (mode) == 2)
1513 	rv = gen_rtx_REG (mode, TARGET_A16 ? R1_REGNO : R0_REGNO);
1514       break;
1515 
1516     case 2:
1517       if (TARGET_A16 && GET_MODE_SIZE (mode) == 2)
1518 	rv = gen_rtx_REG (mode, R2_REGNO);
1519       break;
1520     }
1521 
1522 #if DEBUG0
1523   debug_rtx (rv);
1524 #endif
1525   return rv;
1526 }
1527 
1528 #undef TARGET_PASS_BY_REFERENCE
1529 #define TARGET_PASS_BY_REFERENCE m32c_pass_by_reference
1530 static bool
1531 m32c_pass_by_reference (CUMULATIVE_ARGS * ca ATTRIBUTE_UNUSED,
1532 			enum machine_mode mode ATTRIBUTE_UNUSED,
1533 			const_tree type ATTRIBUTE_UNUSED,
1534 			bool named ATTRIBUTE_UNUSED)
1535 {
1536   return 0;
1537 }
1538 
1539 /* Implements INIT_CUMULATIVE_ARGS.  */
1540 void
1541 m32c_init_cumulative_args (CUMULATIVE_ARGS * ca,
1542 			   tree fntype,
1543 			   rtx libname ATTRIBUTE_UNUSED,
1544 			   tree fndecl,
1545 			   int n_named_args ATTRIBUTE_UNUSED)
1546 {
1547   if (fntype && aggregate_value_p (TREE_TYPE (fntype), fndecl))
1548     ca->force_mem = 1;
1549   else
1550     ca->force_mem = 0;
1551   ca->parm_num = 1;
1552 }
1553 
1554 /* Implements FUNCTION_ARG_ADVANCE.  force_mem is set for functions
1555    returning structures, so we always reset that.  Otherwise, we only
1556    need to know the sequence number of the argument to know what to do
1557    with it.  */
1558 void
1559 m32c_function_arg_advance (CUMULATIVE_ARGS * ca,
1560 			   enum machine_mode mode ATTRIBUTE_UNUSED,
1561 			   tree type ATTRIBUTE_UNUSED,
1562 			   int named ATTRIBUTE_UNUSED)
1563 {
1564   if (ca->force_mem)
1565     ca->force_mem = 0;
1566   else
1567     ca->parm_num++;
1568 }
1569 
1570 /* Implements FUNCTION_ARG_REGNO_P.  */
1571 int
1572 m32c_function_arg_regno_p (int r)
1573 {
1574   if (TARGET_A24)
1575     return (r == R0_REGNO);
1576   return (r == R1_REGNO || r == R2_REGNO);
1577 }
1578 
1579 /* HImode and PSImode are the two "native" modes as far as GCC is
1580    concerned, but the chips also support a 32-bit mode which is used
1581    for some opcodes in R8C/M16C and for reset vectors and such.  */
1582 #undef TARGET_VALID_POINTER_MODE
1583 #define TARGET_VALID_POINTER_MODE m32c_valid_pointer_mode
1584 static bool
1585 m32c_valid_pointer_mode (enum machine_mode mode)
1586 {
1587   if (mode == HImode
1588       || mode == PSImode
1589       || mode == SImode
1590       )
1591     return 1;
1592   return 0;
1593 }
1594 
1595 /* How Scalar Function Values Are Returned */
1596 
1597 /* Implements TARGET_LIBCALL_VALUE.  Most values are returned in $r0, or some
1598    combination of registers starting there (r2r0 for longs, r3r1r2r0
1599    for long long, r3r2r1r0 for doubles), except that that ABI
1600    currently doesn't work because it ends up using all available
1601    general registers and gcc often can't compile it.  So, instead, we
1602    return anything bigger than 16 bits in "mem0" (effectively, a
1603    memory location).  */
1604 
1605 #undef TARGET_LIBCALL_VALUE
1606 #define TARGET_LIBCALL_VALUE m32c_libcall_value
1607 
1608 static rtx
1609 m32c_libcall_value (enum machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
1610 {
1611   /* return reg or parallel */
1612 #if 0
1613   /* FIXME: GCC has difficulty returning large values in registers,
1614      because that ties up most of the general registers and gives the
1615      register allocator little to work with.  Until we can resolve
1616      this, large values are returned in memory.  */
1617   if (mode == DFmode)
1618     {
1619       rtx rv;
1620 
1621       rv = gen_rtx_PARALLEL (mode, rtvec_alloc (4));
1622       XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1623 					      gen_rtx_REG (HImode,
1624 							   R0_REGNO),
1625 					      GEN_INT (0));
1626       XVECEXP (rv, 0, 1) = gen_rtx_EXPR_LIST (VOIDmode,
1627 					      gen_rtx_REG (HImode,
1628 							   R1_REGNO),
1629 					      GEN_INT (2));
1630       XVECEXP (rv, 0, 2) = gen_rtx_EXPR_LIST (VOIDmode,
1631 					      gen_rtx_REG (HImode,
1632 							   R2_REGNO),
1633 					      GEN_INT (4));
1634       XVECEXP (rv, 0, 3) = gen_rtx_EXPR_LIST (VOIDmode,
1635 					      gen_rtx_REG (HImode,
1636 							   R3_REGNO),
1637 					      GEN_INT (6));
1638       return rv;
1639     }
1640 
1641   if (TARGET_A24 && GET_MODE_SIZE (mode) > 2)
1642     {
1643       rtx rv;
1644 
1645       rv = gen_rtx_PARALLEL (mode, rtvec_alloc (1));
1646       XVECEXP (rv, 0, 0) = gen_rtx_EXPR_LIST (VOIDmode,
1647 					      gen_rtx_REG (mode,
1648 							   R0_REGNO),
1649 					      GEN_INT (0));
1650       return rv;
1651     }
1652 #endif
1653 
1654   if (GET_MODE_SIZE (mode) > 2)
1655     return gen_rtx_REG (mode, MEM0_REGNO);
1656   return gen_rtx_REG (mode, R0_REGNO);
1657 }
1658 
1659 /* Implements TARGET_FUNCTION_VALUE.  Functions and libcalls have the same
1660    conventions.  */
1661 
1662 #undef TARGET_FUNCTION_VALUE
1663 #define TARGET_FUNCTION_VALUE m32c_function_value
1664 
1665 static rtx
1666 m32c_function_value (const_tree valtype,
1667 		     const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
1668 		     bool outgoing ATTRIBUTE_UNUSED)
1669 {
1670   /* return reg or parallel */
1671   const enum machine_mode mode = TYPE_MODE (valtype);
1672   return m32c_libcall_value (mode, NULL_RTX);
1673 }
1674 
1675 /* Implements FUNCTION_VALUE_REGNO_P.  */
1676 
1677 bool
1678 m32c_function_value_regno_p (const unsigned int regno)
1679 {
1680   return (regno == R0_REGNO || regno == MEM0_REGNO);
1681 }
1682 
1683 /* How Large Values Are Returned */
1684 
1685 /* We return structures by pushing the address on the stack, even if
1686    we use registers for the first few "real" arguments.  */
1687 #undef TARGET_STRUCT_VALUE_RTX
1688 #define TARGET_STRUCT_VALUE_RTX m32c_struct_value_rtx
1689 static rtx
1690 m32c_struct_value_rtx (tree fndecl ATTRIBUTE_UNUSED,
1691 		       int incoming ATTRIBUTE_UNUSED)
1692 {
1693   return 0;
1694 }
1695 
1696 /* Function Entry and Exit */
1697 
1698 /* Implements EPILOGUE_USES.  Interrupts restore all registers.  */
1699 int
1700 m32c_epilogue_uses (int regno ATTRIBUTE_UNUSED)
1701 {
1702   if (cfun->machine->is_interrupt)
1703     return 1;
1704   return 0;
1705 }
1706 
1707 /* Implementing the Varargs Macros */
1708 
1709 #undef TARGET_STRICT_ARGUMENT_NAMING
1710 #define TARGET_STRICT_ARGUMENT_NAMING m32c_strict_argument_naming
1711 static bool
1712 m32c_strict_argument_naming (CUMULATIVE_ARGS * ca ATTRIBUTE_UNUSED)
1713 {
1714   return 1;
1715 }
1716 
1717 /* Trampolines for Nested Functions */
1718 
1719 /*
1720    m16c:
1721    1 0000 75C43412              mov.w   #0x1234,a0
1722    2 0004 FC000000              jmp.a   label
1723 
1724    m32c:
1725    1 0000 BC563412              mov.l:s #0x123456,a0
1726    2 0004 CC000000              jmp.a   label
1727 */
1728 
1729 /* Implements TRAMPOLINE_SIZE.  */
1730 int
1731 m32c_trampoline_size (void)
1732 {
1733   /* Allocate extra space so we can avoid the messy shifts when we
1734      initialize the trampoline; we just write past the end of the
1735      opcode.  */
1736   return TARGET_A16 ? 8 : 10;
1737 }
1738 
1739 /* Implements TRAMPOLINE_ALIGNMENT.  */
1740 int
1741 m32c_trampoline_alignment (void)
1742 {
1743   return 2;
1744 }
1745 
1746 /* Implements TARGET_TRAMPOLINE_INIT.  */
1747 
1748 #undef TARGET_TRAMPOLINE_INIT
1749 #define TARGET_TRAMPOLINE_INIT m32c_trampoline_init
1750 static void
1751 m32c_trampoline_init (rtx m_tramp, tree fndecl, rtx chainval)
1752 {
1753   rtx function = XEXP (DECL_RTL (fndecl), 0);
1754 
1755 #define A0(m,i) adjust_address (m_tramp, m, i)
1756   if (TARGET_A16)
1757     {
1758       /* Note: we subtract a "word" because the moves want signed
1759 	 constants, not unsigned constants.  */
1760       emit_move_insn (A0 (HImode, 0), GEN_INT (0xc475 - 0x10000));
1761       emit_move_insn (A0 (HImode, 2), chainval);
1762       emit_move_insn (A0 (QImode, 4), GEN_INT (0xfc - 0x100));
1763       /* We use 16-bit addresses here, but store the zero to turn it
1764 	 into a 24-bit offset.  */
1765       emit_move_insn (A0 (HImode, 5), function);
1766       emit_move_insn (A0 (QImode, 7), GEN_INT (0x00));
1767     }
1768   else
1769     {
1770       /* Note that the PSI moves actually write 4 bytes.  Make sure we
1771 	 write stuff out in the right order, and leave room for the
1772 	 extra byte at the end.  */
1773       emit_move_insn (A0 (QImode, 0), GEN_INT (0xbc - 0x100));
1774       emit_move_insn (A0 (PSImode, 1), chainval);
1775       emit_move_insn (A0 (QImode, 4), GEN_INT (0xcc - 0x100));
1776       emit_move_insn (A0 (PSImode, 5), function);
1777     }
1778 #undef A0
1779 }
1780 
1781 /* Implicit Calls to Library Routines */
1782 
1783 #undef TARGET_INIT_LIBFUNCS
1784 #define TARGET_INIT_LIBFUNCS m32c_init_libfuncs
1785 static void
1786 m32c_init_libfuncs (void)
1787 {
1788   /* We do this because the M32C has an HImode operand, but the
1789      M16C has an 8-bit operand.  Since gcc looks at the match data
1790      and not the expanded rtl, we have to reset the optab so that
1791      the right modes are found. */
1792   if (TARGET_A24)
1793     {
1794       optab_handler (cstore_optab, QImode)->insn_code = CODE_FOR_cstoreqi4_24;
1795       optab_handler (cstore_optab, HImode)->insn_code = CODE_FOR_cstorehi4_24;
1796       optab_handler (cstore_optab, PSImode)->insn_code = CODE_FOR_cstorepsi4_24;
1797     }
1798 }
1799 
1800 /* Addressing Modes */
1801 
1802 /* The r8c/m32c family supports a wide range of non-orthogonal
1803    addressing modes, including the ability to double-indirect on *some*
1804    of them.  Not all insns support all modes, either, but we rely on
1805    predicates and constraints to deal with that.  */
1806 #undef TARGET_LEGITIMATE_ADDRESS_P
1807 #define TARGET_LEGITIMATE_ADDRESS_P m32c_legitimate_address_p
1808 bool
1809 m32c_legitimate_address_p (enum machine_mode mode, rtx x, bool strict)
1810 {
1811   int mode_adjust;
1812   if (CONSTANT_P (x))
1813     return 1;
1814 
1815   /* Wide references to memory will be split after reload, so we must
1816      ensure that all parts of such splits remain legitimate
1817      addresses.  */
1818   mode_adjust = GET_MODE_SIZE (mode) - 1;
1819 
1820   /* allowing PLUS yields mem:HI(plus:SI(mem:SI(plus:SI in m32c_split_move */
1821   if (GET_CODE (x) == PRE_DEC
1822       || GET_CODE (x) == POST_INC || GET_CODE (x) == PRE_MODIFY)
1823     {
1824       return (GET_CODE (XEXP (x, 0)) == REG
1825 	      && REGNO (XEXP (x, 0)) == SP_REGNO);
1826     }
1827 
1828 #if 0
1829   /* This is the double indirection detection, but it currently
1830      doesn't work as cleanly as this code implies, so until we've had
1831      a chance to debug it, leave it disabled.  */
1832   if (TARGET_A24 && GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) != PLUS)
1833     {
1834 #if DEBUG_DOUBLE
1835       fprintf (stderr, "double indirect\n");
1836 #endif
1837       x = XEXP (x, 0);
1838     }
1839 #endif
1840 
1841   encode_pattern (x);
1842   if (RTX_IS ("r"))
1843     {
1844       /* Most indexable registers can be used without displacements,
1845 	 although some of them will be emitted with an explicit zero
1846 	 to please the assembler.  */
1847       switch (REGNO (patternr[0]))
1848 	{
1849 	case A0_REGNO:
1850 	case A1_REGNO:
1851 	case SB_REGNO:
1852 	case FB_REGNO:
1853 	case SP_REGNO:
1854 	  return 1;
1855 
1856 	default:
1857 	  if (IS_PSEUDO (patternr[0], strict))
1858 	    return 1;
1859 	  return 0;
1860 	}
1861     }
1862   if (RTX_IS ("+ri"))
1863     {
1864       /* This is more interesting, because different base registers
1865 	 allow for different displacements - both range and signedness
1866 	 - and it differs from chip series to chip series too.  */
1867       int rn = REGNO (patternr[1]);
1868       HOST_WIDE_INT offs = INTVAL (patternr[2]);
1869       switch (rn)
1870 	{
1871 	case A0_REGNO:
1872 	case A1_REGNO:
1873 	case SB_REGNO:
1874 	  /* The syntax only allows positive offsets, but when the
1875 	     offsets span the entire memory range, we can simulate
1876 	     negative offsets by wrapping.  */
1877 	  if (TARGET_A16)
1878 	    return (offs >= -65536 && offs <= 65535 - mode_adjust);
1879 	  if (rn == SB_REGNO)
1880 	    return (offs >= 0 && offs <= 65535 - mode_adjust);
1881 	  /* A0 or A1 */
1882 	  return (offs >= -16777216 && offs <= 16777215);
1883 
1884 	case FB_REGNO:
1885 	  if (TARGET_A16)
1886 	    return (offs >= -128 && offs <= 127 - mode_adjust);
1887 	  return (offs >= -65536 && offs <= 65535 - mode_adjust);
1888 
1889 	case SP_REGNO:
1890 	  return (offs >= -128 && offs <= 127 - mode_adjust);
1891 
1892 	default:
1893 	  if (IS_PSEUDO (patternr[1], strict))
1894 	    return 1;
1895 	  return 0;
1896 	}
1897     }
1898   if (RTX_IS ("+rs") || RTX_IS ("+r+si"))
1899     {
1900       rtx reg = patternr[1];
1901 
1902       /* We don't know where the symbol is, so only allow base
1903 	 registers which support displacements spanning the whole
1904 	 address range.  */
1905       switch (REGNO (reg))
1906 	{
1907 	case A0_REGNO:
1908 	case A1_REGNO:
1909 	  /* $sb needs a secondary reload, but since it's involved in
1910 	     memory address reloads too, we don't deal with it very
1911 	     well.  */
1912 	  /*    case SB_REGNO: */
1913 	  return 1;
1914 	default:
1915 	  if (IS_PSEUDO (reg, strict))
1916 	    return 1;
1917 	  return 0;
1918 	}
1919     }
1920   return 0;
1921 }
1922 
1923 /* Implements REG_OK_FOR_BASE_P.  */
1924 int
1925 m32c_reg_ok_for_base_p (rtx x, int strict)
1926 {
1927   if (GET_CODE (x) != REG)
1928     return 0;
1929   switch (REGNO (x))
1930     {
1931     case A0_REGNO:
1932     case A1_REGNO:
1933     case SB_REGNO:
1934     case FB_REGNO:
1935     case SP_REGNO:
1936       return 1;
1937     default:
1938       if (IS_PSEUDO (x, strict))
1939 	return 1;
1940       return 0;
1941     }
1942 }
1943 
1944 /* We have three choices for choosing fb->aN offsets.  If we choose -128,
1945    we need one MOVA -128[fb],aN opcode and 16-bit aN displacements,
1946    like this:
1947        EB 4B FF    mova    -128[$fb],$a0
1948        D8 0C FF FF mov.w:Q #0,-1[$a0]
1949 
1950    Alternately, we subtract the frame size, and hopefully use 8-bit aN
1951    displacements:
1952        7B F4       stc $fb,$a0
1953        77 54 00 01 sub #256,$a0
1954        D8 08 01    mov.w:Q #0,1[$a0]
1955 
1956    If we don't offset (i.e. offset by zero), we end up with:
1957        7B F4       stc $fb,$a0
1958        D8 0C 00 FF mov.w:Q #0,-256[$a0]
1959 
1960    We have to subtract *something* so that we have a PLUS rtx to mark
1961    that we've done this reload.  The -128 offset will never result in
1962    an 8-bit aN offset, and the payoff for the second case is five
1963    loads *if* those loads are within 256 bytes of the other end of the
1964    frame, so the third case seems best.  Note that we subtract the
1965    zero, but detect that in the addhi3 pattern.  */
1966 
1967 #define BIG_FB_ADJ 0
1968 
1969 /* Implements LEGITIMIZE_ADDRESS.  The only address we really have to
1970    worry about is frame base offsets, as $fb has a limited
1971    displacement range.  We deal with this by attempting to reload $fb
1972    itself into an address register; that seems to result in the best
1973    code.  */
1974 #undef TARGET_LEGITIMIZE_ADDRESS
1975 #define TARGET_LEGITIMIZE_ADDRESS m32c_legitimize_address
1976 static rtx
1977 m32c_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1978 			 enum machine_mode mode)
1979 {
1980 #if DEBUG0
1981   fprintf (stderr, "m32c_legitimize_address for mode %s\n", mode_name[mode]);
1982   debug_rtx (x);
1983   fprintf (stderr, "\n");
1984 #endif
1985 
1986   if (GET_CODE (x) == PLUS
1987       && GET_CODE (XEXP (x, 0)) == REG
1988       && REGNO (XEXP (x, 0)) == FB_REGNO
1989       && GET_CODE (XEXP (x, 1)) == CONST_INT
1990       && (INTVAL (XEXP (x, 1)) < -128
1991 	  || INTVAL (XEXP (x, 1)) > (128 - GET_MODE_SIZE (mode))))
1992     {
1993       /* reload FB to A_REGS */
1994       rtx temp = gen_reg_rtx (Pmode);
1995       x = copy_rtx (x);
1996       emit_insn (gen_rtx_SET (VOIDmode, temp, XEXP (x, 0)));
1997       XEXP (x, 0) = temp;
1998     }
1999 
2000   return x;
2001 }
2002 
2003 /* Implements LEGITIMIZE_RELOAD_ADDRESS.  See comment above.  */
2004 int
2005 m32c_legitimize_reload_address (rtx * x,
2006 				enum machine_mode mode,
2007 				int opnum,
2008 				int type, int ind_levels ATTRIBUTE_UNUSED)
2009 {
2010 #if DEBUG0
2011   fprintf (stderr, "\nm32c_legitimize_reload_address for mode %s\n",
2012 	   mode_name[mode]);
2013   debug_rtx (*x);
2014 #endif
2015 
2016   /* At one point, this function tried to get $fb copied to an address
2017      register, which in theory would maximize sharing, but gcc was
2018      *also* still trying to reload the whole address, and we'd run out
2019      of address registers.  So we let gcc do the naive (but safe)
2020      reload instead, when the above function doesn't handle it for
2021      us.
2022 
2023      The code below is a second attempt at the above.  */
2024 
2025   if (GET_CODE (*x) == PLUS
2026       && GET_CODE (XEXP (*x, 0)) == REG
2027       && REGNO (XEXP (*x, 0)) == FB_REGNO
2028       && GET_CODE (XEXP (*x, 1)) == CONST_INT
2029       && (INTVAL (XEXP (*x, 1)) < -128
2030 	  || INTVAL (XEXP (*x, 1)) > (128 - GET_MODE_SIZE (mode))))
2031     {
2032       rtx sum;
2033       int offset = INTVAL (XEXP (*x, 1));
2034       int adjustment = -BIG_FB_ADJ;
2035 
2036       sum = gen_rtx_PLUS (Pmode, XEXP (*x, 0),
2037 			  GEN_INT (adjustment));
2038       *x = gen_rtx_PLUS (Pmode, sum, GEN_INT (offset - adjustment));
2039       if (type == RELOAD_OTHER)
2040 	type = RELOAD_FOR_OTHER_ADDRESS;
2041       push_reload (sum, NULL_RTX, &XEXP (*x, 0), NULL,
2042 		   A_REGS, Pmode, VOIDmode, 0, 0, opnum,
2043 		   type);
2044       return 1;
2045     }
2046 
2047   if (GET_CODE (*x) == PLUS
2048       && GET_CODE (XEXP (*x, 0)) == PLUS
2049       && GET_CODE (XEXP (XEXP (*x, 0), 0)) == REG
2050       && REGNO (XEXP (XEXP (*x, 0), 0)) == FB_REGNO
2051       && GET_CODE (XEXP (XEXP (*x, 0), 1)) == CONST_INT
2052       && GET_CODE (XEXP (*x, 1)) == CONST_INT
2053       )
2054     {
2055       if (type == RELOAD_OTHER)
2056 	type = RELOAD_FOR_OTHER_ADDRESS;
2057       push_reload (XEXP (*x, 0), NULL_RTX, &XEXP (*x, 0), NULL,
2058 		   A_REGS, Pmode, VOIDmode, 0, 0, opnum,
2059 		   type);
2060       return 1;
2061     }
2062 
2063   return 0;
2064 }
2065 
2066 /* Implements LEGITIMATE_CONSTANT_P.  We split large constants anyway,
2067    so we can allow anything.  */
2068 int
2069 m32c_legitimate_constant_p (rtx x ATTRIBUTE_UNUSED)
2070 {
2071   return 1;
2072 }
2073 
2074 
2075 /* Condition Code Status */
2076 
2077 #undef TARGET_FIXED_CONDITION_CODE_REGS
2078 #define TARGET_FIXED_CONDITION_CODE_REGS m32c_fixed_condition_code_regs
2079 static bool
2080 m32c_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
2081 {
2082   *p1 = FLG_REGNO;
2083   *p2 = INVALID_REGNUM;
2084   return true;
2085 }
2086 
2087 /* Describing Relative Costs of Operations */
2088 
2089 /* Implements REGISTER_MOVE_COST.  We make impossible moves
2090    prohibitively expensive, like trying to put QIs in r2/r3 (there are
2091    no opcodes to do that).  We also discourage use of mem* registers
2092    since they're really memory.  */
2093 int
2094 m32c_register_move_cost (enum machine_mode mode, int from, int to)
2095 {
2096   int cost = COSTS_N_INSNS (3);
2097   int cc = class_contents[from][0] | class_contents[to][0];
2098   /* FIXME: pick real values, but not 2 for now.  */
2099   if (mode == QImode && (cc & class_contents[R23_REGS][0]))
2100     {
2101       if (!(cc & ~class_contents[R23_REGS][0]))
2102 	cost = COSTS_N_INSNS (1000);
2103       else
2104 	cost = COSTS_N_INSNS (80);
2105     }
2106 
2107   if (!class_can_hold_mode (from, mode) || !class_can_hold_mode (to, mode))
2108     cost = COSTS_N_INSNS (1000);
2109 
2110   if (classes_intersect (from, CR_REGS))
2111     cost += COSTS_N_INSNS (5);
2112 
2113   if (classes_intersect (to, CR_REGS))
2114     cost += COSTS_N_INSNS (5);
2115 
2116   if (from == MEM_REGS || to == MEM_REGS)
2117     cost += COSTS_N_INSNS (50);
2118   else if (classes_intersect (from, MEM_REGS)
2119 	   || classes_intersect (to, MEM_REGS))
2120     cost += COSTS_N_INSNS (10);
2121 
2122 #if DEBUG0
2123   fprintf (stderr, "register_move_cost %s from %s to %s = %d\n",
2124 	   mode_name[mode], class_names[from], class_names[to], cost);
2125 #endif
2126   return cost;
2127 }
2128 
2129 /*  Implements MEMORY_MOVE_COST.  */
2130 int
2131 m32c_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2132 		       int reg_class ATTRIBUTE_UNUSED,
2133 		       int in ATTRIBUTE_UNUSED)
2134 {
2135   /* FIXME: pick real values.  */
2136   return COSTS_N_INSNS (10);
2137 }
2138 
2139 /* Here we try to describe when we use multiple opcodes for one RTX so
2140    that gcc knows when to use them.  */
2141 #undef TARGET_RTX_COSTS
2142 #define TARGET_RTX_COSTS m32c_rtx_costs
2143 static bool
2144 m32c_rtx_costs (rtx x, int code, int outer_code, int *total,
2145 		bool speed ATTRIBUTE_UNUSED)
2146 {
2147   switch (code)
2148     {
2149     case REG:
2150       if (REGNO (x) >= MEM0_REGNO && REGNO (x) <= MEM7_REGNO)
2151 	*total += COSTS_N_INSNS (500);
2152       else
2153 	*total += COSTS_N_INSNS (1);
2154       return true;
2155 
2156     case ASHIFT:
2157     case LSHIFTRT:
2158     case ASHIFTRT:
2159       if (GET_CODE (XEXP (x, 1)) != CONST_INT)
2160 	{
2161 	  /* mov.b r1l, r1h */
2162 	  *total +=  COSTS_N_INSNS (1);
2163 	  return true;
2164 	}
2165       if (INTVAL (XEXP (x, 1)) > 8
2166 	  || INTVAL (XEXP (x, 1)) < -8)
2167 	{
2168 	  /* mov.b #N, r1l */
2169 	  /* mov.b r1l, r1h */
2170 	  *total +=  COSTS_N_INSNS (2);
2171 	  return true;
2172 	}
2173       return true;
2174 
2175     case LE:
2176     case LEU:
2177     case LT:
2178     case LTU:
2179     case GT:
2180     case GTU:
2181     case GE:
2182     case GEU:
2183     case NE:
2184     case EQ:
2185       if (outer_code == SET)
2186 	{
2187 	  *total += COSTS_N_INSNS (2);
2188 	  return true;
2189 	}
2190       break;
2191 
2192     case ZERO_EXTRACT:
2193       {
2194 	rtx dest = XEXP (x, 0);
2195 	rtx addr = XEXP (dest, 0);
2196 	switch (GET_CODE (addr))
2197 	  {
2198 	  case CONST_INT:
2199 	    *total += COSTS_N_INSNS (1);
2200 	    break;
2201 	  case SYMBOL_REF:
2202 	    *total += COSTS_N_INSNS (3);
2203 	    break;
2204 	  default:
2205 	    *total += COSTS_N_INSNS (2);
2206 	    break;
2207 	  }
2208 	return true;
2209       }
2210       break;
2211 
2212     default:
2213       /* Reasonable default.  */
2214       if (TARGET_A16 && GET_MODE(x) == SImode)
2215 	*total += COSTS_N_INSNS (2);
2216       break;
2217     }
2218   return false;
2219 }
2220 
2221 #undef TARGET_ADDRESS_COST
2222 #define TARGET_ADDRESS_COST m32c_address_cost
2223 static int
2224 m32c_address_cost (rtx addr, bool speed ATTRIBUTE_UNUSED)
2225 {
2226   int i;
2227   /*  fprintf(stderr, "\naddress_cost\n");
2228       debug_rtx(addr);*/
2229   switch (GET_CODE (addr))
2230     {
2231     case CONST_INT:
2232       i = INTVAL (addr);
2233       if (i == 0)
2234 	return COSTS_N_INSNS(1);
2235       if (0 < i && i <= 255)
2236 	return COSTS_N_INSNS(2);
2237       if (0 < i && i <= 65535)
2238 	return COSTS_N_INSNS(3);
2239       return COSTS_N_INSNS(4);
2240     case SYMBOL_REF:
2241       return COSTS_N_INSNS(4);
2242     case REG:
2243       return COSTS_N_INSNS(1);
2244     case PLUS:
2245       if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
2246 	{
2247 	  i = INTVAL (XEXP (addr, 1));
2248 	  if (i == 0)
2249 	    return COSTS_N_INSNS(1);
2250 	  if (0 < i && i <= 255)
2251 	    return COSTS_N_INSNS(2);
2252 	  if (0 < i && i <= 65535)
2253 	    return COSTS_N_INSNS(3);
2254 	}
2255       return COSTS_N_INSNS(4);
2256     default:
2257       return 0;
2258     }
2259 }
2260 
2261 /* Defining the Output Assembler Language */
2262 
2263 /* The Overall Framework of an Assembler File */
2264 
2265 #undef TARGET_HAVE_NAMED_SECTIONS
2266 #define TARGET_HAVE_NAMED_SECTIONS true
2267 
2268 /* Output of Data */
2269 
2270 /* We may have 24 bit sizes, which is the native address size.
2271    Currently unused, but provided for completeness.  */
2272 #undef TARGET_ASM_INTEGER
2273 #define TARGET_ASM_INTEGER m32c_asm_integer
2274 static bool
2275 m32c_asm_integer (rtx x, unsigned int size, int aligned_p)
2276 {
2277   switch (size)
2278     {
2279     case 3:
2280       fprintf (asm_out_file, "\t.3byte\t");
2281       output_addr_const (asm_out_file, x);
2282       fputc ('\n', asm_out_file);
2283       return true;
2284     case 4:
2285       if (GET_CODE (x) == SYMBOL_REF)
2286 	{
2287 	  fprintf (asm_out_file, "\t.long\t");
2288 	  output_addr_const (asm_out_file, x);
2289 	  fputc ('\n', asm_out_file);
2290 	  return true;
2291 	}
2292       break;
2293     }
2294   return default_assemble_integer (x, size, aligned_p);
2295 }
2296 
2297 /* Output of Assembler Instructions */
2298 
2299 /* We use a lookup table because the addressing modes are non-orthogonal.  */
2300 
2301 static struct
2302 {
2303   char code;
2304   char const *pattern;
2305   char const *format;
2306 }
2307 const conversions[] = {
2308   { 0, "r", "0" },
2309 
2310   { 0, "mr", "z[1]" },
2311   { 0, "m+ri", "3[2]" },
2312   { 0, "m+rs", "3[2]" },
2313   { 0, "m+r+si", "4+5[2]" },
2314   { 0, "ms", "1" },
2315   { 0, "mi", "1" },
2316   { 0, "m+si", "2+3" },
2317 
2318   { 0, "mmr", "[z[2]]" },
2319   { 0, "mm+ri", "[4[3]]" },
2320   { 0, "mm+rs", "[4[3]]" },
2321   { 0, "mm+r+si", "[5+6[3]]" },
2322   { 0, "mms", "[[2]]" },
2323   { 0, "mmi", "[[2]]" },
2324   { 0, "mm+si", "[4[3]]" },
2325 
2326   { 0, "i", "#0" },
2327   { 0, "s", "#0" },
2328   { 0, "+si", "#1+2" },
2329   { 0, "l", "#0" },
2330 
2331   { 'l', "l", "0" },
2332   { 'd', "i", "0" },
2333   { 'd', "s", "0" },
2334   { 'd', "+si", "1+2" },
2335   { 'D', "i", "0" },
2336   { 'D', "s", "0" },
2337   { 'D', "+si", "1+2" },
2338   { 'x', "i", "#0" },
2339   { 'X', "i", "#0" },
2340   { 'm', "i", "#0" },
2341   { 'b', "i", "#0" },
2342   { 'B', "i", "0" },
2343   { 'p', "i", "0" },
2344 
2345   { 0, 0, 0 }
2346 };
2347 
2348 /* This is in order according to the bitfield that pushm/popm use.  */
2349 static char const *pushm_regs[] = {
2350   "fb", "sb", "a1", "a0", "r3", "r2", "r1", "r0"
2351 };
2352 
2353 /* Implements PRINT_OPERAND.  */
2354 void
2355 m32c_print_operand (FILE * file, rtx x, int code)
2356 {
2357   int i, j, b;
2358   const char *comma;
2359   HOST_WIDE_INT ival;
2360   int unsigned_const = 0;
2361   int force_sign;
2362 
2363   /* Multiplies; constants are converted to sign-extended format but
2364    we need unsigned, so 'u' and 'U' tell us what size unsigned we
2365    need.  */
2366   if (code == 'u')
2367     {
2368       unsigned_const = 2;
2369       code = 0;
2370     }
2371   if (code == 'U')
2372     {
2373       unsigned_const = 1;
2374       code = 0;
2375     }
2376   /* This one is only for debugging; you can put it in a pattern to
2377      force this error.  */
2378   if (code == '!')
2379     {
2380       fprintf (stderr, "dj: unreviewed pattern:");
2381       if (current_output_insn)
2382 	debug_rtx (current_output_insn);
2383       gcc_unreachable ();
2384     }
2385   /* PSImode operations are either .w or .l depending on the target.  */
2386   if (code == '&')
2387     {
2388       if (TARGET_A16)
2389 	fprintf (file, "w");
2390       else
2391 	fprintf (file, "l");
2392       return;
2393     }
2394   /* Inverted conditionals.  */
2395   if (code == 'C')
2396     {
2397       switch (GET_CODE (x))
2398 	{
2399 	case LE:
2400 	  fputs ("gt", file);
2401 	  break;
2402 	case LEU:
2403 	  fputs ("gtu", file);
2404 	  break;
2405 	case LT:
2406 	  fputs ("ge", file);
2407 	  break;
2408 	case LTU:
2409 	  fputs ("geu", file);
2410 	  break;
2411 	case GT:
2412 	  fputs ("le", file);
2413 	  break;
2414 	case GTU:
2415 	  fputs ("leu", file);
2416 	  break;
2417 	case GE:
2418 	  fputs ("lt", file);
2419 	  break;
2420 	case GEU:
2421 	  fputs ("ltu", file);
2422 	  break;
2423 	case NE:
2424 	  fputs ("eq", file);
2425 	  break;
2426 	case EQ:
2427 	  fputs ("ne", file);
2428 	  break;
2429 	default:
2430 	  gcc_unreachable ();
2431 	}
2432       return;
2433     }
2434   /* Regular conditionals.  */
2435   if (code == 'c')
2436     {
2437       switch (GET_CODE (x))
2438 	{
2439 	case LE:
2440 	  fputs ("le", file);
2441 	  break;
2442 	case LEU:
2443 	  fputs ("leu", file);
2444 	  break;
2445 	case LT:
2446 	  fputs ("lt", file);
2447 	  break;
2448 	case LTU:
2449 	  fputs ("ltu", file);
2450 	  break;
2451 	case GT:
2452 	  fputs ("gt", file);
2453 	  break;
2454 	case GTU:
2455 	  fputs ("gtu", file);
2456 	  break;
2457 	case GE:
2458 	  fputs ("ge", file);
2459 	  break;
2460 	case GEU:
2461 	  fputs ("geu", file);
2462 	  break;
2463 	case NE:
2464 	  fputs ("ne", file);
2465 	  break;
2466 	case EQ:
2467 	  fputs ("eq", file);
2468 	  break;
2469 	default:
2470 	  gcc_unreachable ();
2471 	}
2472       return;
2473     }
2474   /* Used in negsi2 to do HImode ops on the two parts of an SImode
2475      operand.  */
2476   if (code == 'h' && GET_MODE (x) == SImode)
2477     {
2478       x = m32c_subreg (HImode, x, SImode, 0);
2479       code = 0;
2480     }
2481   if (code == 'H' && GET_MODE (x) == SImode)
2482     {
2483       x = m32c_subreg (HImode, x, SImode, 2);
2484       code = 0;
2485     }
2486   if (code == 'h' && GET_MODE (x) == HImode)
2487     {
2488       x = m32c_subreg (QImode, x, HImode, 0);
2489       code = 0;
2490     }
2491   if (code == 'H' && GET_MODE (x) == HImode)
2492     {
2493       /* We can't actually represent this as an rtx.  Do it here.  */
2494       if (GET_CODE (x) == REG)
2495 	{
2496 	  switch (REGNO (x))
2497 	    {
2498 	    case R0_REGNO:
2499 	      fputs ("r0h", file);
2500 	      return;
2501 	    case R1_REGNO:
2502 	      fputs ("r1h", file);
2503 	      return;
2504 	    default:
2505 	      gcc_unreachable();
2506 	    }
2507 	}
2508       /* This should be a MEM.  */
2509       x = m32c_subreg (QImode, x, HImode, 1);
2510       code = 0;
2511     }
2512   /* This is for BMcond, which always wants word register names.  */
2513   if (code == 'h' && GET_MODE (x) == QImode)
2514     {
2515       if (GET_CODE (x) == REG)
2516 	x = gen_rtx_REG (HImode, REGNO (x));
2517       code = 0;
2518     }
2519   /* 'x' and 'X' need to be ignored for non-immediates.  */
2520   if ((code == 'x' || code == 'X') && GET_CODE (x) != CONST_INT)
2521     code = 0;
2522 
2523   encode_pattern (x);
2524   force_sign = 0;
2525   for (i = 0; conversions[i].pattern; i++)
2526     if (conversions[i].code == code
2527 	&& streq (conversions[i].pattern, pattern))
2528       {
2529 	for (j = 0; conversions[i].format[j]; j++)
2530 	  /* backslash quotes the next character in the output pattern.  */
2531 	  if (conversions[i].format[j] == '\\')
2532 	    {
2533 	      fputc (conversions[i].format[j + 1], file);
2534 	      j++;
2535 	    }
2536 	  /* Digits in the output pattern indicate that the
2537 	     corresponding RTX is to be output at that point.  */
2538 	  else if (ISDIGIT (conversions[i].format[j]))
2539 	    {
2540 	      rtx r = patternr[conversions[i].format[j] - '0'];
2541 	      switch (GET_CODE (r))
2542 		{
2543 		case REG:
2544 		  fprintf (file, "%s",
2545 			   reg_name_with_mode (REGNO (r), GET_MODE (r)));
2546 		  break;
2547 		case CONST_INT:
2548 		  switch (code)
2549 		    {
2550 		    case 'b':
2551 		    case 'B':
2552 		      {
2553 			int v = INTVAL (r);
2554 			int i = (int) exact_log2 (v);
2555 			if (i == -1)
2556 			  i = (int) exact_log2 ((v ^ 0xffff) & 0xffff);
2557 			if (i == -1)
2558 			  i = (int) exact_log2 ((v ^ 0xff) & 0xff);
2559 			/* Bit position.  */
2560 			fprintf (file, "%d", i);
2561 		      }
2562 		      break;
2563 		    case 'x':
2564 		      /* Unsigned byte.  */
2565 		      fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2566 			       INTVAL (r) & 0xff);
2567 		      break;
2568 		    case 'X':
2569 		      /* Unsigned word.  */
2570 		      fprintf (file, HOST_WIDE_INT_PRINT_HEX,
2571 			       INTVAL (r) & 0xffff);
2572 		      break;
2573 		    case 'p':
2574 		      /* pushm and popm encode a register set into a single byte.  */
2575 		      comma = "";
2576 		      for (b = 7; b >= 0; b--)
2577 			if (INTVAL (r) & (1 << b))
2578 			  {
2579 			    fprintf (file, "%s%s", comma, pushm_regs[b]);
2580 			    comma = ",";
2581 			  }
2582 		      break;
2583 		    case 'm':
2584 		      /* "Minus".  Output -X  */
2585 		      ival = (-INTVAL (r) & 0xffff);
2586 		      if (ival & 0x8000)
2587 			ival = ival - 0x10000;
2588 		      fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2589 		      break;
2590 		    default:
2591 		      ival = INTVAL (r);
2592 		      if (conversions[i].format[j + 1] == '[' && ival < 0)
2593 			{
2594 			  /* We can simulate negative displacements by
2595 			     taking advantage of address space
2596 			     wrapping when the offset can span the
2597 			     entire address range.  */
2598 			  rtx base =
2599 			    patternr[conversions[i].format[j + 2] - '0'];
2600 			  if (GET_CODE (base) == REG)
2601 			    switch (REGNO (base))
2602 			      {
2603 			      case A0_REGNO:
2604 			      case A1_REGNO:
2605 				if (TARGET_A24)
2606 				  ival = 0x1000000 + ival;
2607 				else
2608 				  ival = 0x10000 + ival;
2609 				break;
2610 			      case SB_REGNO:
2611 				if (TARGET_A16)
2612 				  ival = 0x10000 + ival;
2613 				break;
2614 			      }
2615 			}
2616 		      else if (code == 'd' && ival < 0 && j == 0)
2617 			/* The "mova" opcode is used to do addition by
2618 			   computing displacements, but again, we need
2619 			   displacements to be unsigned *if* they're
2620 			   the only component of the displacement
2621 			   (i.e. no "symbol-4" type displacement).  */
2622 			ival = (TARGET_A24 ? 0x1000000 : 0x10000) + ival;
2623 
2624 		      if (conversions[i].format[j] == '0')
2625 			{
2626 			  /* More conversions to unsigned.  */
2627 			  if (unsigned_const == 2)
2628 			    ival &= 0xffff;
2629 			  if (unsigned_const == 1)
2630 			    ival &= 0xff;
2631 			}
2632 		      if (streq (conversions[i].pattern, "mi")
2633 			  || streq (conversions[i].pattern, "mmi"))
2634 			{
2635 			  /* Integers used as addresses are unsigned.  */
2636 			  ival &= (TARGET_A24 ? 0xffffff : 0xffff);
2637 			}
2638 		      if (force_sign && ival >= 0)
2639 			fputc ('+', file);
2640 		      fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
2641 		      break;
2642 		    }
2643 		  break;
2644 		case CONST_DOUBLE:
2645 		  /* We don't have const_double constants.  If it
2646 		     happens, make it obvious.  */
2647 		  fprintf (file, "[const_double 0x%lx]",
2648 			   (unsigned long) CONST_DOUBLE_HIGH (r));
2649 		  break;
2650 		case SYMBOL_REF:
2651 		  assemble_name (file, XSTR (r, 0));
2652 		  break;
2653 		case LABEL_REF:
2654 		  output_asm_label (r);
2655 		  break;
2656 		default:
2657 		  fprintf (stderr, "don't know how to print this operand:");
2658 		  debug_rtx (r);
2659 		  gcc_unreachable ();
2660 		}
2661 	    }
2662 	  else
2663 	    {
2664 	      if (conversions[i].format[j] == 'z')
2665 		{
2666 		  /* Some addressing modes *must* have a displacement,
2667 		     so insert a zero here if needed.  */
2668 		  int k;
2669 		  for (k = j + 1; conversions[i].format[k]; k++)
2670 		    if (ISDIGIT (conversions[i].format[k]))
2671 		      {
2672 			rtx reg = patternr[conversions[i].format[k] - '0'];
2673 			if (GET_CODE (reg) == REG
2674 			    && (REGNO (reg) == SB_REGNO
2675 				|| REGNO (reg) == FB_REGNO
2676 				|| REGNO (reg) == SP_REGNO))
2677 			  fputc ('0', file);
2678 		      }
2679 		  continue;
2680 		}
2681 	      /* Signed displacements off symbols need to have signs
2682 		 blended cleanly.  */
2683 	      if (conversions[i].format[j] == '+'
2684 		  && (!code || code == 'D' || code == 'd')
2685 		  && ISDIGIT (conversions[i].format[j + 1])
2686 		  && (GET_CODE (patternr[conversions[i].format[j + 1] - '0'])
2687 		      == CONST_INT))
2688 		{
2689 		  force_sign = 1;
2690 		  continue;
2691 		}
2692 	      fputc (conversions[i].format[j], file);
2693 	    }
2694 	break;
2695       }
2696   if (!conversions[i].pattern)
2697     {
2698       fprintf (stderr, "unconvertible operand %c `%s'", code ? code : '-',
2699 	       pattern);
2700       debug_rtx (x);
2701       fprintf (file, "[%c.%s]", code ? code : '-', pattern);
2702     }
2703 
2704   return;
2705 }
2706 
2707 /* Implements PRINT_OPERAND_PUNCT_VALID_P.  See m32c_print_operand
2708    above for descriptions of what these do.  */
2709 int
2710 m32c_print_operand_punct_valid_p (int c)
2711 {
2712   if (c == '&' || c == '!')
2713     return 1;
2714   return 0;
2715 }
2716 
2717 /* Implements PRINT_OPERAND_ADDRESS.  Nothing unusual here.  */
2718 void
2719 m32c_print_operand_address (FILE * stream, rtx address)
2720 {
2721   if (GET_CODE (address) == MEM)
2722     address = XEXP (address, 0);
2723   else
2724     /* cf: gcc.dg/asm-4.c.  */
2725     gcc_assert (GET_CODE (address) == REG);
2726 
2727   m32c_print_operand (stream, address, 0);
2728 }
2729 
2730 /* Implements ASM_OUTPUT_REG_PUSH.  Control registers are pushed
2731    differently than general registers.  */
2732 void
2733 m32c_output_reg_push (FILE * s, int regno)
2734 {
2735   if (regno == FLG_REGNO)
2736     fprintf (s, "\tpushc\tflg\n");
2737   else
2738     fprintf (s, "\tpush.%c\t%s\n",
2739 	     " bwll"[reg_push_size (regno)], reg_names[regno]);
2740 }
2741 
2742 /* Likewise for ASM_OUTPUT_REG_POP.  */
2743 void
2744 m32c_output_reg_pop (FILE * s, int regno)
2745 {
2746   if (regno == FLG_REGNO)
2747     fprintf (s, "\tpopc\tflg\n");
2748   else
2749     fprintf (s, "\tpop.%c\t%s\n",
2750 	     " bwll"[reg_push_size (regno)], reg_names[regno]);
2751 }
2752 
2753 /* Defining target-specific uses of `__attribute__' */
2754 
2755 /* Used to simplify the logic below.  Find the attributes wherever
2756    they may be.  */
2757 #define M32C_ATTRIBUTES(decl) \
2758   (TYPE_P (decl)) ? TYPE_ATTRIBUTES (decl) \
2759                 : DECL_ATTRIBUTES (decl) \
2760                   ? (DECL_ATTRIBUTES (decl)) \
2761 		  : TYPE_ATTRIBUTES (TREE_TYPE (decl))
2762 
2763 /* Returns TRUE if the given tree has the "interrupt" attribute.  */
2764 static int
2765 interrupt_p (tree node ATTRIBUTE_UNUSED)
2766 {
2767   tree list = M32C_ATTRIBUTES (node);
2768   while (list)
2769     {
2770       if (is_attribute_p ("interrupt", TREE_PURPOSE (list)))
2771 	return 1;
2772       list = TREE_CHAIN (list);
2773     }
2774   return fast_interrupt_p (node);
2775 }
2776 
2777 /* Returns TRUE if the given tree has the "bank_switch" attribute.  */
2778 static int
2779 bank_switch_p (tree node ATTRIBUTE_UNUSED)
2780 {
2781   tree list = M32C_ATTRIBUTES (node);
2782   while (list)
2783     {
2784       if (is_attribute_p ("bank_switch", TREE_PURPOSE (list)))
2785 	return 1;
2786       list = TREE_CHAIN (list);
2787     }
2788   return 0;
2789 }
2790 
2791 /* Returns TRUE if the given tree has the "fast_interrupt" attribute.  */
2792 static int
2793 fast_interrupt_p (tree node ATTRIBUTE_UNUSED)
2794 {
2795   tree list = M32C_ATTRIBUTES (node);
2796   while (list)
2797     {
2798       if (is_attribute_p ("fast_interrupt", TREE_PURPOSE (list)))
2799 	return 1;
2800       list = TREE_CHAIN (list);
2801     }
2802   return 0;
2803 }
2804 
2805 static tree
2806 interrupt_handler (tree * node ATTRIBUTE_UNUSED,
2807 		   tree name ATTRIBUTE_UNUSED,
2808 		   tree args ATTRIBUTE_UNUSED,
2809 		   int flags ATTRIBUTE_UNUSED,
2810 		   bool * no_add_attrs ATTRIBUTE_UNUSED)
2811 {
2812   return NULL_TREE;
2813 }
2814 
2815 /* Returns TRUE if given tree has the "function_vector" attribute. */
2816 int
2817 m32c_special_page_vector_p (tree func)
2818 {
2819   tree list;
2820 
2821   if (TREE_CODE (func) != FUNCTION_DECL)
2822     return 0;
2823 
2824   list = M32C_ATTRIBUTES (func);
2825   while (list)
2826     {
2827       if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
2828         return 1;
2829       list = TREE_CHAIN (list);
2830     }
2831   return 0;
2832 }
2833 
2834 static tree
2835 function_vector_handler (tree * node ATTRIBUTE_UNUSED,
2836                          tree name ATTRIBUTE_UNUSED,
2837                          tree args ATTRIBUTE_UNUSED,
2838                          int flags ATTRIBUTE_UNUSED,
2839                          bool * no_add_attrs ATTRIBUTE_UNUSED)
2840 {
2841   if (TARGET_R8C)
2842     {
2843       /* The attribute is not supported for R8C target.  */
2844       warning (OPT_Wattributes,
2845                 "%qE attribute is not supported for R8C target",
2846                 name);
2847       *no_add_attrs = true;
2848     }
2849   else if (TREE_CODE (*node) != FUNCTION_DECL)
2850     {
2851       /* The attribute must be applied to functions only.  */
2852       warning (OPT_Wattributes,
2853                 "%qE attribute applies only to functions",
2854                 name);
2855       *no_add_attrs = true;
2856     }
2857   else if (TREE_CODE (TREE_VALUE (args)) != INTEGER_CST)
2858     {
2859       /* The argument must be a constant integer.  */
2860       warning (OPT_Wattributes,
2861                 "%qE attribute argument not an integer constant",
2862                 name);
2863       *no_add_attrs = true;
2864     }
2865   else if (TREE_INT_CST_LOW (TREE_VALUE (args)) < 18
2866            || TREE_INT_CST_LOW (TREE_VALUE (args)) > 255)
2867     {
2868       /* The argument value must be between 18 to 255.  */
2869       warning (OPT_Wattributes,
2870                 "%qE attribute argument should be between 18 to 255",
2871                 name);
2872       *no_add_attrs = true;
2873     }
2874   return NULL_TREE;
2875 }
2876 
2877 /* If the function is assigned the attribute 'function_vector', it
2878    returns the function vector number, otherwise returns zero.  */
2879 int
2880 current_function_special_page_vector (rtx x)
2881 {
2882   int num;
2883 
2884   if ((GET_CODE(x) == SYMBOL_REF)
2885       && (SYMBOL_REF_FLAGS (x) & SYMBOL_FLAG_FUNCVEC_FUNCTION))
2886     {
2887       tree list;
2888       tree t = SYMBOL_REF_DECL (x);
2889 
2890       if (TREE_CODE (t) != FUNCTION_DECL)
2891         return 0;
2892 
2893       list = M32C_ATTRIBUTES (t);
2894       while (list)
2895         {
2896           if (is_attribute_p ("function_vector", TREE_PURPOSE (list)))
2897             {
2898               num = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (list)));
2899               return num;
2900             }
2901 
2902           list = TREE_CHAIN (list);
2903         }
2904 
2905       return 0;
2906     }
2907   else
2908     return 0;
2909 }
2910 
2911 #undef TARGET_ATTRIBUTE_TABLE
2912 #define TARGET_ATTRIBUTE_TABLE m32c_attribute_table
2913 static const struct attribute_spec m32c_attribute_table[] = {
2914   {"interrupt", 0, 0, false, false, false, interrupt_handler},
2915   {"bank_switch", 0, 0, false, false, false, interrupt_handler},
2916   {"fast_interrupt", 0, 0, false, false, false, interrupt_handler},
2917   {"function_vector", 1, 1, true,  false, false, function_vector_handler},
2918   {0, 0, 0, 0, 0, 0, 0}
2919 };
2920 
2921 #undef TARGET_COMP_TYPE_ATTRIBUTES
2922 #define TARGET_COMP_TYPE_ATTRIBUTES m32c_comp_type_attributes
2923 static int
2924 m32c_comp_type_attributes (const_tree type1 ATTRIBUTE_UNUSED,
2925 			   const_tree type2 ATTRIBUTE_UNUSED)
2926 {
2927   /* 0=incompatible 1=compatible 2=warning */
2928   return 1;
2929 }
2930 
2931 #undef TARGET_INSERT_ATTRIBUTES
2932 #define TARGET_INSERT_ATTRIBUTES m32c_insert_attributes
2933 static void
2934 m32c_insert_attributes (tree node ATTRIBUTE_UNUSED,
2935 			tree * attr_ptr ATTRIBUTE_UNUSED)
2936 {
2937   /* Nothing to do here.  */
2938 }
2939 
2940 /* Predicates */
2941 
2942 /* This is a list of legal subregs of hard regs.  */
2943 static const struct {
2944   unsigned char outer_mode_size;
2945   unsigned char inner_mode_size;
2946   unsigned char byte_mask;
2947   unsigned char legal_when;
2948   unsigned int regno;
2949 } legal_subregs[] = {
2950   {1, 2, 0x03, 1, R0_REGNO}, /* r0h r0l */
2951   {1, 2, 0x03, 1, R1_REGNO}, /* r1h r1l */
2952   {1, 2, 0x01, 1, A0_REGNO},
2953   {1, 2, 0x01, 1, A1_REGNO},
2954 
2955   {1, 4, 0x01, 1, A0_REGNO},
2956   {1, 4, 0x01, 1, A1_REGNO},
2957 
2958   {2, 4, 0x05, 1, R0_REGNO}, /* r2 r0 */
2959   {2, 4, 0x05, 1, R1_REGNO}, /* r3 r1 */
2960   {2, 4, 0x05, 16, A0_REGNO}, /* a1 a0 */
2961   {2, 4, 0x01, 24, A0_REGNO}, /* a1 a0 */
2962   {2, 4, 0x01, 24, A1_REGNO}, /* a1 a0 */
2963 
2964   {4, 8, 0x55, 1, R0_REGNO}, /* r3 r1 r2 r0 */
2965 };
2966 
2967 /* Returns TRUE if OP is a subreg of a hard reg which we don't
2968    support.  */
2969 bool
2970 m32c_illegal_subreg_p (rtx op)
2971 {
2972   int offset;
2973   unsigned int i;
2974   int src_mode, dest_mode;
2975 
2976   if (GET_CODE (op) != SUBREG)
2977     return false;
2978 
2979   dest_mode = GET_MODE (op);
2980   offset = SUBREG_BYTE (op);
2981   op = SUBREG_REG (op);
2982   src_mode = GET_MODE (op);
2983 
2984   if (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (src_mode))
2985     return false;
2986   if (GET_CODE (op) != REG)
2987     return false;
2988   if (REGNO (op) >= MEM0_REGNO)
2989     return false;
2990 
2991   offset = (1 << offset);
2992 
2993   for (i = 0; i < ARRAY_SIZE (legal_subregs); i ++)
2994     if (legal_subregs[i].outer_mode_size == GET_MODE_SIZE (dest_mode)
2995 	&& legal_subregs[i].regno == REGNO (op)
2996 	&& legal_subregs[i].inner_mode_size == GET_MODE_SIZE (src_mode)
2997 	&& legal_subregs[i].byte_mask & offset)
2998       {
2999 	switch (legal_subregs[i].legal_when)
3000 	  {
3001 	  case 1:
3002 	    return false;
3003 	  case 16:
3004 	    if (TARGET_A16)
3005 	      return false;
3006 	    break;
3007 	  case 24:
3008 	    if (TARGET_A24)
3009 	      return false;
3010 	    break;
3011 	  }
3012       }
3013   return true;
3014 }
3015 
3016 /* Returns TRUE if we support a move between the first two operands.
3017    At the moment, we just want to discourage mem to mem moves until
3018    after reload, because reload has a hard time with our limited
3019    number of address registers, and we can get into a situation where
3020    we need three of them when we only have two.  */
3021 bool
3022 m32c_mov_ok (rtx * operands, enum machine_mode mode ATTRIBUTE_UNUSED)
3023 {
3024   rtx op0 = operands[0];
3025   rtx op1 = operands[1];
3026 
3027   if (TARGET_A24)
3028     return true;
3029 
3030 #define DEBUG_MOV_OK 0
3031 #if DEBUG_MOV_OK
3032   fprintf (stderr, "m32c_mov_ok %s\n", mode_name[mode]);
3033   debug_rtx (op0);
3034   debug_rtx (op1);
3035 #endif
3036 
3037   if (GET_CODE (op0) == SUBREG)
3038     op0 = XEXP (op0, 0);
3039   if (GET_CODE (op1) == SUBREG)
3040     op1 = XEXP (op1, 0);
3041 
3042   if (GET_CODE (op0) == MEM
3043       && GET_CODE (op1) == MEM
3044       && ! reload_completed)
3045     {
3046 #if DEBUG_MOV_OK
3047       fprintf (stderr, " - no, mem to mem\n");
3048 #endif
3049       return false;
3050     }
3051 
3052 #if DEBUG_MOV_OK
3053   fprintf (stderr, " - ok\n");
3054 #endif
3055   return true;
3056 }
3057 
3058 /* Returns TRUE if two consecutive HImode mov instructions, generated
3059    for moving an immediate double data to a double data type variable
3060    location, can be combined into single SImode mov instruction.  */
3061 bool
3062 m32c_immd_dbl_mov (rtx * operands,
3063 		   enum machine_mode mode ATTRIBUTE_UNUSED)
3064 {
3065   int flag = 0, okflag = 0, offset1 = 0, offset2 = 0, offsetsign = 0;
3066   const char *str1;
3067   const char *str2;
3068 
3069   if (GET_CODE (XEXP (operands[0], 0)) == SYMBOL_REF
3070       && MEM_SCALAR_P (operands[0])
3071       && !MEM_IN_STRUCT_P (operands[0])
3072       && GET_CODE (XEXP (operands[2], 0)) == CONST
3073       && GET_CODE (XEXP (XEXP (operands[2], 0), 0)) == PLUS
3074       && GET_CODE (XEXP (XEXP (XEXP (operands[2], 0), 0), 0)) == SYMBOL_REF
3075       && GET_CODE (XEXP (XEXP (XEXP (operands[2], 0), 0), 1)) == CONST_INT
3076       && MEM_SCALAR_P (operands[2])
3077       && !MEM_IN_STRUCT_P (operands[2]))
3078     flag = 1;
3079 
3080   else if (GET_CODE (XEXP (operands[0], 0)) == CONST
3081            && GET_CODE (XEXP (XEXP (operands[0], 0), 0)) == PLUS
3082            && GET_CODE (XEXP (XEXP (XEXP (operands[0], 0), 0), 0)) == SYMBOL_REF
3083            && MEM_SCALAR_P (operands[0])
3084            && !MEM_IN_STRUCT_P (operands[0])
3085            && !(INTVAL (XEXP (XEXP (XEXP (operands[0], 0), 0), 1)) %4)
3086            && GET_CODE (XEXP (operands[2], 0)) == CONST
3087            && GET_CODE (XEXP (XEXP (operands[2], 0), 0)) == PLUS
3088            && GET_CODE (XEXP (XEXP (XEXP (operands[2], 0), 0), 0)) == SYMBOL_REF
3089            && MEM_SCALAR_P (operands[2])
3090            && !MEM_IN_STRUCT_P (operands[2]))
3091     flag = 2;
3092 
3093   else if (GET_CODE (XEXP (operands[0], 0)) == PLUS
3094            &&  GET_CODE (XEXP (XEXP (operands[0], 0), 0)) == REG
3095            &&  REGNO (XEXP (XEXP (operands[0], 0), 0)) == FB_REGNO
3096            &&  GET_CODE (XEXP (XEXP (operands[0], 0), 1)) == CONST_INT
3097            &&  MEM_SCALAR_P (operands[0])
3098            &&  !MEM_IN_STRUCT_P (operands[0])
3099            &&  !(INTVAL (XEXP (XEXP (operands[0], 0), 1)) %4)
3100            &&  REGNO (XEXP (XEXP (operands[2], 0), 0)) == FB_REGNO
3101            &&  GET_CODE (XEXP (XEXP (operands[2], 0), 1)) == CONST_INT
3102            &&  MEM_SCALAR_P (operands[2])
3103            &&  !MEM_IN_STRUCT_P (operands[2]))
3104     flag = 3;
3105 
3106   else
3107     return false;
3108 
3109   switch (flag)
3110     {
3111     case 1:
3112       str1 = XSTR (XEXP (operands[0], 0), 0);
3113       str2 = XSTR (XEXP (XEXP (XEXP (operands[2], 0), 0), 0), 0);
3114       if (strcmp (str1, str2) == 0)
3115 	okflag = 1;
3116       else
3117 	okflag = 0;
3118       break;
3119     case 2:
3120       str1 = XSTR (XEXP (XEXP (XEXP (operands[0], 0), 0), 0), 0);
3121       str2 = XSTR (XEXP (XEXP (XEXP (operands[2], 0), 0), 0), 0);
3122       if (strcmp(str1,str2) == 0)
3123 	okflag = 1;
3124       else
3125 	okflag = 0;
3126       break;
3127     case 3:
3128       offset1 = INTVAL (XEXP (XEXP (operands[0], 0), 1));
3129       offset2 = INTVAL (XEXP (XEXP (operands[2], 0), 1));
3130       offsetsign = offset1 >> ((sizeof (offset1) * 8) -1);
3131       if (((offset2-offset1) == 2) && offsetsign != 0)
3132 	okflag = 1;
3133       else
3134 	okflag = 0;
3135       break;
3136     default:
3137       okflag = 0;
3138     }
3139 
3140   if (okflag == 1)
3141     {
3142       HOST_WIDE_INT val;
3143       operands[4] = gen_rtx_MEM (SImode, XEXP (operands[0], 0));
3144 
3145       val = (INTVAL (operands[3]) << 16) + (INTVAL (operands[1]) & 0xFFFF);
3146       operands[5] = gen_rtx_CONST_INT (VOIDmode, val);
3147 
3148       return true;
3149     }
3150 
3151   return false;
3152 }
3153 
3154 /* Expanders */
3155 
3156 /* Subregs are non-orthogonal for us, because our registers are all
3157    different sizes.  */
3158 static rtx
3159 m32c_subreg (enum machine_mode outer,
3160 	     rtx x, enum machine_mode inner, int byte)
3161 {
3162   int r, nr = -1;
3163 
3164   /* Converting MEMs to different types that are the same size, we
3165      just rewrite them.  */
3166   if (GET_CODE (x) == SUBREG
3167       && SUBREG_BYTE (x) == 0
3168       && GET_CODE (SUBREG_REG (x)) == MEM
3169       && (GET_MODE_SIZE (GET_MODE (x))
3170 	  == GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
3171     {
3172       rtx oldx = x;
3173       x = gen_rtx_MEM (GET_MODE (x), XEXP (SUBREG_REG (x), 0));
3174       MEM_COPY_ATTRIBUTES (x, SUBREG_REG (oldx));
3175     }
3176 
3177   /* Push/pop get done as smaller push/pops.  */
3178   if (GET_CODE (x) == MEM
3179       && (GET_CODE (XEXP (x, 0)) == PRE_DEC
3180 	  || GET_CODE (XEXP (x, 0)) == POST_INC))
3181     return gen_rtx_MEM (outer, XEXP (x, 0));
3182   if (GET_CODE (x) == SUBREG
3183       && GET_CODE (XEXP (x, 0)) == MEM
3184       && (GET_CODE (XEXP (XEXP (x, 0), 0)) == PRE_DEC
3185 	  || GET_CODE (XEXP (XEXP (x, 0), 0)) == POST_INC))
3186     return gen_rtx_MEM (outer, XEXP (XEXP (x, 0), 0));
3187 
3188   if (GET_CODE (x) != REG)
3189     return simplify_gen_subreg (outer, x, inner, byte);
3190 
3191   r = REGNO (x);
3192   if (r >= FIRST_PSEUDO_REGISTER || r == AP_REGNO)
3193     return simplify_gen_subreg (outer, x, inner, byte);
3194 
3195   if (IS_MEM_REGNO (r))
3196     return simplify_gen_subreg (outer, x, inner, byte);
3197 
3198   /* This is where the complexities of our register layout are
3199      described.  */
3200   if (byte == 0)
3201     nr = r;
3202   else if (outer == HImode)
3203     {
3204       if (r == R0_REGNO && byte == 2)
3205 	nr = R2_REGNO;
3206       else if (r == R0_REGNO && byte == 4)
3207 	nr = R1_REGNO;
3208       else if (r == R0_REGNO && byte == 6)
3209 	nr = R3_REGNO;
3210       else if (r == R1_REGNO && byte == 2)
3211 	nr = R3_REGNO;
3212       else if (r == A0_REGNO && byte == 2)
3213 	nr = A1_REGNO;
3214     }
3215   else if (outer == SImode)
3216     {
3217       if (r == R0_REGNO && byte == 0)
3218 	nr = R0_REGNO;
3219       else if (r == R0_REGNO && byte == 4)
3220 	nr = R1_REGNO;
3221     }
3222   if (nr == -1)
3223     {
3224       fprintf (stderr, "m32c_subreg %s %s %d\n",
3225 	       mode_name[outer], mode_name[inner], byte);
3226       debug_rtx (x);
3227       gcc_unreachable ();
3228     }
3229   return gen_rtx_REG (outer, nr);
3230 }
3231 
3232 /* Used to emit move instructions.  We split some moves,
3233    and avoid mem-mem moves.  */
3234 int
3235 m32c_prepare_move (rtx * operands, enum machine_mode mode)
3236 {
3237   if (TARGET_A16 && mode == PSImode)
3238     return m32c_split_move (operands, mode, 1);
3239   if ((GET_CODE (operands[0]) == MEM)
3240       && (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY))
3241     {
3242       rtx pmv = XEXP (operands[0], 0);
3243       rtx dest_reg = XEXP (pmv, 0);
3244       rtx dest_mod = XEXP (pmv, 1);
3245 
3246       emit_insn (gen_rtx_SET (Pmode, dest_reg, dest_mod));
3247       operands[0] = gen_rtx_MEM (mode, dest_reg);
3248     }
3249   if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
3250     operands[1] = copy_to_mode_reg (mode, operands[1]);
3251   return 0;
3252 }
3253 
3254 #define DEBUG_SPLIT 0
3255 
3256 /* Returns TRUE if the given PSImode move should be split.  We split
3257    for all r8c/m16c moves, since it doesn't support them, and for
3258    POP.L as we can only *push* SImode.  */
3259 int
3260 m32c_split_psi_p (rtx * operands)
3261 {
3262 #if DEBUG_SPLIT
3263   fprintf (stderr, "\nm32c_split_psi_p\n");
3264   debug_rtx (operands[0]);
3265   debug_rtx (operands[1]);
3266 #endif
3267   if (TARGET_A16)
3268     {
3269 #if DEBUG_SPLIT
3270       fprintf (stderr, "yes, A16\n");
3271 #endif
3272       return 1;
3273     }
3274   if (GET_CODE (operands[1]) == MEM
3275       && GET_CODE (XEXP (operands[1], 0)) == POST_INC)
3276     {
3277 #if DEBUG_SPLIT
3278       fprintf (stderr, "yes, pop.l\n");
3279 #endif
3280       return 1;
3281     }
3282 #if DEBUG_SPLIT
3283   fprintf (stderr, "no, default\n");
3284 #endif
3285   return 0;
3286 }
3287 
3288 /* Split the given move.  SPLIT_ALL is 0 if splitting is optional
3289    (define_expand), 1 if it is not optional (define_insn_and_split),
3290    and 3 for define_split (alternate api). */
3291 int
3292 m32c_split_move (rtx * operands, enum machine_mode mode, int split_all)
3293 {
3294   rtx s[4], d[4];
3295   int parts, si, di, rev = 0;
3296   int rv = 0, opi = 2;
3297   enum machine_mode submode = HImode;
3298   rtx *ops, local_ops[10];
3299 
3300   /* define_split modifies the existing operands, but the other two
3301      emit new insns.  OPS is where we store the operand pairs, which
3302      we emit later.  */
3303   if (split_all == 3)
3304     ops = operands;
3305   else
3306     ops = local_ops;
3307 
3308   /* Else HImode.  */
3309   if (mode == DImode)
3310     submode = SImode;
3311 
3312   /* Before splitting mem-mem moves, force one operand into a
3313      register.  */
3314   if (can_create_pseudo_p () && MEM_P (operands[0]) && MEM_P (operands[1]))
3315     {
3316 #if DEBUG0
3317       fprintf (stderr, "force_reg...\n");
3318       debug_rtx (operands[1]);
3319 #endif
3320       operands[1] = force_reg (mode, operands[1]);
3321 #if DEBUG0
3322       debug_rtx (operands[1]);
3323 #endif
3324     }
3325 
3326   parts = 2;
3327 
3328 #if DEBUG_SPLIT
3329   fprintf (stderr, "\nsplit_move %d all=%d\n", !can_create_pseudo_p (),
3330 	   split_all);
3331   debug_rtx (operands[0]);
3332   debug_rtx (operands[1]);
3333 #endif
3334 
3335   /* Note that split_all is not used to select the api after this
3336      point, so it's safe to set it to 3 even with define_insn.  */
3337   /* None of the chips can move SI operands to sp-relative addresses,
3338      so we always split those.  */
3339   if (m32c_extra_constraint_p (operands[0], 'S', "Ss"))
3340     split_all = 3;
3341 
3342   /* We don't need to split these.  */
3343   if (TARGET_A24
3344       && split_all != 3
3345       && (mode == SImode || mode == PSImode)
3346       && !(GET_CODE (operands[1]) == MEM
3347 	   && GET_CODE (XEXP (operands[1], 0)) == POST_INC))
3348     return 0;
3349 
3350   /* First, enumerate the subregs we'll be dealing with.  */
3351   for (si = 0; si < parts; si++)
3352     {
3353       d[si] =
3354 	m32c_subreg (submode, operands[0], mode,
3355 		     si * GET_MODE_SIZE (submode));
3356       s[si] =
3357 	m32c_subreg (submode, operands[1], mode,
3358 		     si * GET_MODE_SIZE (submode));
3359     }
3360 
3361   /* Split pushes by emitting a sequence of smaller pushes.  */
3362   if (GET_CODE (d[0]) == MEM && GET_CODE (XEXP (d[0], 0)) == PRE_DEC)
3363     {
3364       for (si = parts - 1; si >= 0; si--)
3365 	{
3366 	  ops[opi++] = gen_rtx_MEM (submode,
3367 				    gen_rtx_PRE_DEC (Pmode,
3368 						     gen_rtx_REG (Pmode,
3369 								  SP_REGNO)));
3370 	  ops[opi++] = s[si];
3371 	}
3372 
3373       rv = 1;
3374     }
3375   /* Likewise for pops.  */
3376   else if (GET_CODE (s[0]) == MEM && GET_CODE (XEXP (s[0], 0)) == POST_INC)
3377     {
3378       for (di = 0; di < parts; di++)
3379 	{
3380 	  ops[opi++] = d[di];
3381 	  ops[opi++] = gen_rtx_MEM (submode,
3382 				    gen_rtx_POST_INC (Pmode,
3383 						      gen_rtx_REG (Pmode,
3384 								   SP_REGNO)));
3385 	}
3386       rv = 1;
3387     }
3388   else if (split_all)
3389     {
3390       /* if d[di] == s[si] for any di < si, we'll early clobber. */
3391       for (di = 0; di < parts - 1; di++)
3392 	for (si = di + 1; si < parts; si++)
3393 	  if (reg_mentioned_p (d[di], s[si]))
3394 	    rev = 1;
3395 
3396       if (rev)
3397 	for (si = 0; si < parts; si++)
3398 	  {
3399 	    ops[opi++] = d[si];
3400 	    ops[opi++] = s[si];
3401 	  }
3402       else
3403 	for (si = parts - 1; si >= 0; si--)
3404 	  {
3405 	    ops[opi++] = d[si];
3406 	    ops[opi++] = s[si];
3407 	  }
3408       rv = 1;
3409     }
3410   /* Now emit any moves we may have accumulated.  */
3411   if (rv && split_all != 3)
3412     {
3413       int i;
3414       for (i = 2; i < opi; i += 2)
3415 	emit_move_insn (ops[i], ops[i + 1]);
3416     }
3417   return rv;
3418 }
3419 
3420 /* The m32c has a number of opcodes that act like memcpy, strcmp, and
3421    the like.  For the R8C they expect one of the addresses to be in
3422    R1L:An so we need to arrange for that.  Otherwise, it's just a
3423    matter of picking out the operands we want and emitting the right
3424    pattern for them.  All these expanders, which correspond to
3425    patterns in blkmov.md, must return nonzero if they expand the insn,
3426    or zero if they should FAIL.  */
3427 
3428 /* This is a memset() opcode.  All operands are implied, so we need to
3429    arrange for them to be in the right registers.  The opcode wants
3430    addresses, not [mem] syntax.  $0 is the destination (MEM:BLK), $1
3431    the count (HI), and $2 the value (QI).  */
3432 int
3433 m32c_expand_setmemhi(rtx *operands)
3434 {
3435   rtx desta, count, val;
3436   rtx desto, counto;
3437 
3438   desta = XEXP (operands[0], 0);
3439   count = operands[1];
3440   val = operands[2];
3441 
3442   desto = gen_reg_rtx (Pmode);
3443   counto = gen_reg_rtx (HImode);
3444 
3445   if (GET_CODE (desta) != REG
3446       || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3447     desta = copy_to_mode_reg (Pmode, desta);
3448 
3449   /* This looks like an arbitrary restriction, but this is by far the
3450      most common case.  For counts 8..14 this actually results in
3451      smaller code with no speed penalty because the half-sized
3452      constant can be loaded with a shorter opcode.  */
3453   if (GET_CODE (count) == CONST_INT
3454       && GET_CODE (val) == CONST_INT
3455       && ! (INTVAL (count) & 1)
3456       && (INTVAL (count) > 1)
3457       && (INTVAL (val) <= 7 && INTVAL (val) >= -8))
3458     {
3459       unsigned v = INTVAL (val) & 0xff;
3460       v = v | (v << 8);
3461       count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3462       val = copy_to_mode_reg (HImode, GEN_INT (v));
3463       if (TARGET_A16)
3464 	emit_insn (gen_setmemhi_whi_op (desto, counto, val, desta, count));
3465       else
3466 	emit_insn (gen_setmemhi_wpsi_op (desto, counto, val, desta, count));
3467       return 1;
3468     }
3469 
3470   /* This is the generalized memset() case.  */
3471   if (GET_CODE (val) != REG
3472       || REGNO (val) < FIRST_PSEUDO_REGISTER)
3473     val = copy_to_mode_reg (QImode, val);
3474 
3475   if (GET_CODE (count) != REG
3476       || REGNO (count) < FIRST_PSEUDO_REGISTER)
3477     count = copy_to_mode_reg (HImode, count);
3478 
3479   if (TARGET_A16)
3480     emit_insn (gen_setmemhi_bhi_op (desto, counto, val, desta, count));
3481   else
3482     emit_insn (gen_setmemhi_bpsi_op (desto, counto, val, desta, count));
3483 
3484   return 1;
3485 }
3486 
3487 /* This is a memcpy() opcode.  All operands are implied, so we need to
3488    arrange for them to be in the right registers.  The opcode wants
3489    addresses, not [mem] syntax.  $0 is the destination (MEM:BLK), $1
3490    is the source (MEM:BLK), and $2 the count (HI).  */
3491 int
3492 m32c_expand_movmemhi(rtx *operands)
3493 {
3494   rtx desta, srca, count;
3495   rtx desto, srco, counto;
3496 
3497   desta = XEXP (operands[0], 0);
3498   srca = XEXP (operands[1], 0);
3499   count = operands[2];
3500 
3501   desto = gen_reg_rtx (Pmode);
3502   srco = gen_reg_rtx (Pmode);
3503   counto = gen_reg_rtx (HImode);
3504 
3505   if (GET_CODE (desta) != REG
3506       || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3507     desta = copy_to_mode_reg (Pmode, desta);
3508 
3509   if (GET_CODE (srca) != REG
3510       || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3511     srca = copy_to_mode_reg (Pmode, srca);
3512 
3513   /* Similar to setmem, but we don't need to check the value.  */
3514   if (GET_CODE (count) == CONST_INT
3515       && ! (INTVAL (count) & 1)
3516       && (INTVAL (count) > 1))
3517     {
3518       count = copy_to_mode_reg (HImode, GEN_INT (INTVAL (count) / 2));
3519       if (TARGET_A16)
3520 	emit_insn (gen_movmemhi_whi_op (desto, srco, counto, desta, srca, count));
3521       else
3522 	emit_insn (gen_movmemhi_wpsi_op (desto, srco, counto, desta, srca, count));
3523       return 1;
3524     }
3525 
3526   /* This is the generalized memset() case.  */
3527   if (GET_CODE (count) != REG
3528       || REGNO (count) < FIRST_PSEUDO_REGISTER)
3529     count = copy_to_mode_reg (HImode, count);
3530 
3531   if (TARGET_A16)
3532     emit_insn (gen_movmemhi_bhi_op (desto, srco, counto, desta, srca, count));
3533   else
3534     emit_insn (gen_movmemhi_bpsi_op (desto, srco, counto, desta, srca, count));
3535 
3536   return 1;
3537 }
3538 
3539 /* This is a stpcpy() opcode.  $0 is the destination (MEM:BLK) after
3540    the copy, which should point to the NUL at the end of the string,
3541    $1 is the destination (MEM:BLK), and $2 is the source (MEM:BLK).
3542    Since our opcode leaves the destination pointing *after* the NUL,
3543    we must emit an adjustment.  */
3544 int
3545 m32c_expand_movstr(rtx *operands)
3546 {
3547   rtx desta, srca;
3548   rtx desto, srco;
3549 
3550   desta = XEXP (operands[1], 0);
3551   srca = XEXP (operands[2], 0);
3552 
3553   desto = gen_reg_rtx (Pmode);
3554   srco = gen_reg_rtx (Pmode);
3555 
3556   if (GET_CODE (desta) != REG
3557       || REGNO (desta) < FIRST_PSEUDO_REGISTER)
3558     desta = copy_to_mode_reg (Pmode, desta);
3559 
3560   if (GET_CODE (srca) != REG
3561       || REGNO (srca) < FIRST_PSEUDO_REGISTER)
3562     srca = copy_to_mode_reg (Pmode, srca);
3563 
3564   emit_insn (gen_movstr_op (desto, srco, desta, srca));
3565   /* desto ends up being a1, which allows this type of add through MOVA.  */
3566   emit_insn (gen_addpsi3 (operands[0], desto, GEN_INT (-1)));
3567 
3568   return 1;
3569 }
3570 
3571 /* This is a strcmp() opcode.  $0 is the destination (HI) which holds
3572    <=>0 depending on the comparison, $1 is one string (MEM:BLK), and
3573    $2 is the other (MEM:BLK).  We must do the comparison, and then
3574    convert the flags to a signed integer result.  */
3575 int
3576 m32c_expand_cmpstr(rtx *operands)
3577 {
3578   rtx src1a, src2a;
3579 
3580   src1a = XEXP (operands[1], 0);
3581   src2a = XEXP (operands[2], 0);
3582 
3583   if (GET_CODE (src1a) != REG
3584       || REGNO (src1a) < FIRST_PSEUDO_REGISTER)
3585     src1a = copy_to_mode_reg (Pmode, src1a);
3586 
3587   if (GET_CODE (src2a) != REG
3588       || REGNO (src2a) < FIRST_PSEUDO_REGISTER)
3589     src2a = copy_to_mode_reg (Pmode, src2a);
3590 
3591   emit_insn (gen_cmpstrhi_op (src1a, src2a, src1a, src2a));
3592   emit_insn (gen_cond_to_int (operands[0]));
3593 
3594   return 1;
3595 }
3596 
3597 
3598 typedef rtx (*shift_gen_func)(rtx, rtx, rtx);
3599 
3600 static shift_gen_func
3601 shift_gen_func_for (int mode, int code)
3602 {
3603 #define GFF(m,c,f) if (mode == m && code == c) return f
3604   GFF(QImode,  ASHIFT,   gen_ashlqi3_i);
3605   GFF(QImode,  ASHIFTRT, gen_ashrqi3_i);
3606   GFF(QImode,  LSHIFTRT, gen_lshrqi3_i);
3607   GFF(HImode,  ASHIFT,   gen_ashlhi3_i);
3608   GFF(HImode,  ASHIFTRT, gen_ashrhi3_i);
3609   GFF(HImode,  LSHIFTRT, gen_lshrhi3_i);
3610   GFF(PSImode, ASHIFT,   gen_ashlpsi3_i);
3611   GFF(PSImode, ASHIFTRT, gen_ashrpsi3_i);
3612   GFF(PSImode, LSHIFTRT, gen_lshrpsi3_i);
3613   GFF(SImode,  ASHIFT,   TARGET_A16 ? gen_ashlsi3_16 : gen_ashlsi3_24);
3614   GFF(SImode,  ASHIFTRT, TARGET_A16 ? gen_ashrsi3_16 : gen_ashrsi3_24);
3615   GFF(SImode,  LSHIFTRT, TARGET_A16 ? gen_lshrsi3_16 : gen_lshrsi3_24);
3616 #undef GFF
3617   gcc_unreachable ();
3618 }
3619 
3620 /* The m32c only has one shift, but it takes a signed count.  GCC
3621    doesn't want this, so we fake it by negating any shift count when
3622    we're pretending to shift the other way.  Also, the shift count is
3623    limited to -8..8.  It's slightly better to use two shifts for 9..15
3624    than to load the count into r1h, so we do that too.  */
3625 int
3626 m32c_prepare_shift (rtx * operands, int scale, int shift_code)
3627 {
3628   enum machine_mode mode = GET_MODE (operands[0]);
3629   shift_gen_func func = shift_gen_func_for (mode, shift_code);
3630   rtx temp;
3631 
3632   if (GET_CODE (operands[2]) == CONST_INT)
3633     {
3634       int maxc = TARGET_A24 && (mode == PSImode || mode == SImode) ? 32 : 8;
3635       int count = INTVAL (operands[2]) * scale;
3636 
3637       while (count > maxc)
3638 	{
3639 	  temp = gen_reg_rtx (mode);
3640 	  emit_insn (func (temp, operands[1], GEN_INT (maxc)));
3641 	  operands[1] = temp;
3642 	  count -= maxc;
3643 	}
3644       while (count < -maxc)
3645 	{
3646 	  temp = gen_reg_rtx (mode);
3647 	  emit_insn (func (temp, operands[1], GEN_INT (-maxc)));
3648 	  operands[1] = temp;
3649 	  count += maxc;
3650 	}
3651       emit_insn (func (operands[0], operands[1], GEN_INT (count)));
3652       return 1;
3653     }
3654 
3655   temp = gen_reg_rtx (QImode);
3656   if (scale < 0)
3657     /* The pattern has a NEG that corresponds to this. */
3658     emit_move_insn (temp, gen_rtx_NEG (QImode, operands[2]));
3659   else if (TARGET_A16 && mode == SImode)
3660     /* We do this because the code below may modify this, we don't
3661        want to modify the origin of this value.  */
3662     emit_move_insn (temp, operands[2]);
3663   else
3664     /* We'll only use it for the shift, no point emitting a move.  */
3665     temp = operands[2];
3666 
3667   if (TARGET_A16 && GET_MODE_SIZE (mode) == 4)
3668     {
3669       /* The m16c has a limit of -16..16 for SI shifts, even when the
3670 	 shift count is in a register.  Since there are so many targets
3671 	 of these shifts, it's better to expand the RTL here than to
3672 	 call a helper function.
3673 
3674 	 The resulting code looks something like this:
3675 
3676 		cmp.b	r1h,-16
3677 		jge.b	1f
3678 		shl.l	-16,dest
3679 		add.b	r1h,16
3680 	1f:	cmp.b	r1h,16
3681 		jle.b	1f
3682 		shl.l	16,dest
3683 		sub.b	r1h,16
3684 	1f:	shl.l	r1h,dest
3685 
3686 	 We take advantage of the fact that "negative" shifts are
3687 	 undefined to skip one of the comparisons.  */
3688 
3689       rtx count;
3690       rtx label, lref, insn, tempvar;
3691 
3692       emit_move_insn (operands[0], operands[1]);
3693 
3694       count = temp;
3695       label = gen_label_rtx ();
3696       lref = gen_rtx_LABEL_REF (VOIDmode, label);
3697       LABEL_NUSES (label) ++;
3698 
3699       tempvar = gen_reg_rtx (mode);
3700 
3701       if (shift_code == ASHIFT)
3702 	{
3703 	  /* This is a left shift.  We only need check positive counts.  */
3704 	  emit_jump_insn (gen_cbranchqi4 (gen_rtx_LE (VOIDmode, 0, 0),
3705 					  count, GEN_INT (16), label));
3706 	  emit_insn (func (tempvar, operands[0], GEN_INT (8)));
3707 	  emit_insn (func (operands[0], tempvar, GEN_INT (8)));
3708 	  insn = emit_insn (gen_addqi3 (count, count, GEN_INT (-16)));
3709 	  emit_label_after (label, insn);
3710 	}
3711       else
3712 	{
3713 	  /* This is a right shift.  We only need check negative counts.  */
3714 	  emit_jump_insn (gen_cbranchqi4 (gen_rtx_GE (VOIDmode, 0, 0),
3715 					  count, GEN_INT (-16), label));
3716 	  emit_insn (func (tempvar, operands[0], GEN_INT (-8)));
3717 	  emit_insn (func (operands[0], tempvar, GEN_INT (-8)));
3718 	  insn = emit_insn (gen_addqi3 (count, count, GEN_INT (16)));
3719 	  emit_label_after (label, insn);
3720 	}
3721       operands[1] = operands[0];
3722       emit_insn (func (operands[0], operands[0], count));
3723       return 1;
3724     }
3725 
3726   operands[2] = temp;
3727   return 0;
3728 }
3729 
3730 /* The m32c has a limited range of operations that work on PSImode
3731    values; we have to expand to SI, do the math, and truncate back to
3732    PSI.  Yes, this is expensive, but hopefully gcc will learn to avoid
3733    those cases.  */
3734 void
3735 m32c_expand_neg_mulpsi3 (rtx * operands)
3736 {
3737   /* operands: a = b * i */
3738   rtx temp1; /* b as SI */
3739   rtx scale /* i as SI */;
3740   rtx temp2; /* a*b as SI */
3741 
3742   temp1 = gen_reg_rtx (SImode);
3743   temp2 = gen_reg_rtx (SImode);
3744   if (GET_CODE (operands[2]) != CONST_INT)
3745     {
3746       scale = gen_reg_rtx (SImode);
3747       emit_insn (gen_zero_extendpsisi2 (scale, operands[2]));
3748     }
3749   else
3750     scale = copy_to_mode_reg (SImode, operands[2]);
3751 
3752   emit_insn (gen_zero_extendpsisi2 (temp1, operands[1]));
3753   temp2 = expand_simple_binop (SImode, MULT, temp1, scale, temp2, 1, OPTAB_LIB);
3754   emit_insn (gen_truncsipsi2 (operands[0], temp2));
3755 }
3756 
3757 /* Pattern Output Functions */
3758 
3759 int
3760 m32c_expand_movcc (rtx *operands)
3761 {
3762   rtx rel = operands[1];
3763   rtx cmp;
3764 
3765   if (GET_CODE (rel) != EQ && GET_CODE (rel) != NE)
3766     return 1;
3767   if (GET_CODE (operands[2]) != CONST_INT
3768       || GET_CODE (operands[3]) != CONST_INT)
3769     return 1;
3770   if (GET_CODE (rel) == NE)
3771     {
3772       rtx tmp = operands[2];
3773       operands[2] = operands[3];
3774       operands[3] = tmp;
3775       rel = gen_rtx_EQ (GET_MODE (rel), XEXP (rel, 0), XEXP (rel, 1));
3776     }
3777 
3778   emit_move_insn (operands[0],
3779 		  gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
3780 					rel,
3781 					operands[2],
3782 					operands[3]));
3783   return 0;
3784 }
3785 
3786 /* Used for the "insv" pattern.  Return nonzero to fail, else done.  */
3787 int
3788 m32c_expand_insv (rtx *operands)
3789 {
3790   rtx op0, src0, p;
3791   int mask;
3792 
3793   if (INTVAL (operands[1]) != 1)
3794     return 1;
3795 
3796   /* Our insv opcode (bset, bclr) can only insert a one-bit constant.  */
3797   if (GET_CODE (operands[3]) != CONST_INT)
3798     return 1;
3799   if (INTVAL (operands[3]) != 0
3800       && INTVAL (operands[3]) != 1
3801       && INTVAL (operands[3]) != -1)
3802     return 1;
3803 
3804   mask = 1 << INTVAL (operands[2]);
3805 
3806   op0 = operands[0];
3807   if (GET_CODE (op0) == SUBREG
3808       && SUBREG_BYTE (op0) == 0)
3809     {
3810       rtx sub = SUBREG_REG (op0);
3811       if (GET_MODE (sub) == HImode || GET_MODE (sub) == QImode)
3812 	op0 = sub;
3813     }
3814 
3815   if (!can_create_pseudo_p ()
3816       || (GET_CODE (op0) == MEM && MEM_VOLATILE_P (op0)))
3817     src0 = op0;
3818   else
3819     {
3820       src0 = gen_reg_rtx (GET_MODE (op0));
3821       emit_move_insn (src0, op0);
3822     }
3823 
3824   if (GET_MODE (op0) == HImode
3825       && INTVAL (operands[2]) >= 8
3826       && GET_MODE (op0) == MEM)
3827     {
3828       /* We are little endian.  */
3829       rtx new_mem = gen_rtx_MEM (QImode, plus_constant (XEXP (op0, 0), 1));
3830       MEM_COPY_ATTRIBUTES (new_mem, op0);
3831       mask >>= 8;
3832     }
3833 
3834   /* First, we generate a mask with the correct polarity.  If we are
3835      storing a zero, we want an AND mask, so invert it.  */
3836   if (INTVAL (operands[3]) == 0)
3837     {
3838       /* Storing a zero, use an AND mask */
3839       if (GET_MODE (op0) == HImode)
3840 	mask ^= 0xffff;
3841       else
3842 	mask ^= 0xff;
3843     }
3844   /* Now we need to properly sign-extend the mask in case we need to
3845      fall back to an AND or OR opcode.  */
3846   if (GET_MODE (op0) == HImode)
3847     {
3848       if (mask & 0x8000)
3849 	mask -= 0x10000;
3850     }
3851   else
3852     {
3853       if (mask & 0x80)
3854 	mask -= 0x100;
3855     }
3856 
3857   switch (  (INTVAL (operands[3]) ? 4 : 0)
3858 	  + ((GET_MODE (op0) == HImode) ? 2 : 0)
3859 	  + (TARGET_A24 ? 1 : 0))
3860     {
3861     case 0: p = gen_andqi3_16 (op0, src0, GEN_INT (mask)); break;
3862     case 1: p = gen_andqi3_24 (op0, src0, GEN_INT (mask)); break;
3863     case 2: p = gen_andhi3_16 (op0, src0, GEN_INT (mask)); break;
3864     case 3: p = gen_andhi3_24 (op0, src0, GEN_INT (mask)); break;
3865     case 4: p = gen_iorqi3_16 (op0, src0, GEN_INT (mask)); break;
3866     case 5: p = gen_iorqi3_24 (op0, src0, GEN_INT (mask)); break;
3867     case 6: p = gen_iorhi3_16 (op0, src0, GEN_INT (mask)); break;
3868     case 7: p = gen_iorhi3_24 (op0, src0, GEN_INT (mask)); break;
3869     default: p = NULL_RTX; break; /* Not reached, but silences a warning.  */
3870     }
3871 
3872   emit_insn (p);
3873   return 0;
3874 }
3875 
3876 const char *
3877 m32c_scc_pattern(rtx *operands, RTX_CODE code)
3878 {
3879   static char buf[30];
3880   if (GET_CODE (operands[0]) == REG
3881       && REGNO (operands[0]) == R0_REGNO)
3882     {
3883       if (code == EQ)
3884 	return "stzx\t#1,#0,r0l";
3885       if (code == NE)
3886 	return "stzx\t#0,#1,r0l";
3887     }
3888   sprintf(buf, "bm%s\t0,%%h0\n\tand.b\t#1,%%0", GET_RTX_NAME (code));
3889   return buf;
3890 }
3891 
3892 /* Encode symbol attributes of a SYMBOL_REF into its
3893    SYMBOL_REF_FLAGS. */
3894 static void
3895 m32c_encode_section_info (tree decl, rtx rtl, int first)
3896 {
3897   int extra_flags = 0;
3898 
3899   default_encode_section_info (decl, rtl, first);
3900   if (TREE_CODE (decl) == FUNCTION_DECL
3901       && m32c_special_page_vector_p (decl))
3902 
3903     extra_flags = SYMBOL_FLAG_FUNCVEC_FUNCTION;
3904 
3905   if (extra_flags)
3906     SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= extra_flags;
3907 }
3908 
3909 /* Returns TRUE if the current function is a leaf, and thus we can
3910    determine which registers an interrupt function really needs to
3911    save.  The logic below is mostly about finding the insn sequence
3912    that's the function, versus any sequence that might be open for the
3913    current insn.  */
3914 static int
3915 m32c_leaf_function_p (void)
3916 {
3917   rtx saved_first, saved_last;
3918   struct sequence_stack *seq;
3919   int rv;
3920 
3921   saved_first = crtl->emit.x_first_insn;
3922   saved_last = crtl->emit.x_last_insn;
3923   for (seq = crtl->emit.sequence_stack; seq && seq->next; seq = seq->next)
3924     ;
3925   if (seq)
3926     {
3927       crtl->emit.x_first_insn = seq->first;
3928       crtl->emit.x_last_insn = seq->last;
3929     }
3930 
3931   rv = leaf_function_p ();
3932 
3933   crtl->emit.x_first_insn = saved_first;
3934   crtl->emit.x_last_insn = saved_last;
3935   return rv;
3936 }
3937 
3938 /* Returns TRUE if the current function needs to use the ENTER/EXIT
3939    opcodes.  If the function doesn't need the frame base or stack
3940    pointer, it can use the simpler RTS opcode.  */
3941 static bool
3942 m32c_function_needs_enter (void)
3943 {
3944   rtx insn;
3945   struct sequence_stack *seq;
3946   rtx sp = gen_rtx_REG (Pmode, SP_REGNO);
3947   rtx fb = gen_rtx_REG (Pmode, FB_REGNO);
3948 
3949   insn = get_insns ();
3950   for (seq = crtl->emit.sequence_stack;
3951        seq;
3952        insn = seq->first, seq = seq->next);
3953 
3954   while (insn)
3955     {
3956       if (reg_mentioned_p (sp, insn))
3957 	return true;
3958       if (reg_mentioned_p (fb, insn))
3959 	return true;
3960       insn = NEXT_INSN (insn);
3961     }
3962   return false;
3963 }
3964 
3965 /* Mark all the subexpressions of the PARALLEL rtx PAR as
3966    frame-related.  Return PAR.
3967 
3968    dwarf2out.c:dwarf2out_frame_debug_expr ignores sub-expressions of a
3969    PARALLEL rtx other than the first if they do not have the
3970    FRAME_RELATED flag set on them.  So this function is handy for
3971    marking up 'enter' instructions.  */
3972 static rtx
3973 m32c_all_frame_related (rtx par)
3974 {
3975   int len = XVECLEN (par, 0);
3976   int i;
3977 
3978   for (i = 0; i < len; i++)
3979     F (XVECEXP (par, 0, i));
3980 
3981   return par;
3982 }
3983 
3984 /* Emits the prologue.  See the frame layout comment earlier in this
3985    file.  We can reserve up to 256 bytes with the ENTER opcode, beyond
3986    that we manually update sp.  */
3987 void
3988 m32c_emit_prologue (void)
3989 {
3990   int frame_size, extra_frame_size = 0, reg_save_size;
3991   int complex_prologue = 0;
3992 
3993   cfun->machine->is_leaf = m32c_leaf_function_p ();
3994   if (interrupt_p (cfun->decl))
3995     {
3996       cfun->machine->is_interrupt = 1;
3997       complex_prologue = 1;
3998     }
3999   else if (bank_switch_p (cfun->decl))
4000     warning (OPT_Wattributes,
4001 	     "%<bank_switch%> has no effect on non-interrupt functions");
4002 
4003   reg_save_size = m32c_pushm_popm (PP_justcount);
4004 
4005   if (interrupt_p (cfun->decl))
4006     {
4007       if (bank_switch_p (cfun->decl))
4008 	emit_insn (gen_fset_b ());
4009       else if (cfun->machine->intr_pushm)
4010 	emit_insn (gen_pushm (GEN_INT (cfun->machine->intr_pushm)));
4011     }
4012 
4013   frame_size =
4014     m32c_initial_elimination_offset (FB_REGNO, SP_REGNO) - reg_save_size;
4015   if (frame_size == 0
4016       && !m32c_function_needs_enter ())
4017     cfun->machine->use_rts = 1;
4018 
4019   if (frame_size > 254)
4020     {
4021       extra_frame_size = frame_size - 254;
4022       frame_size = 254;
4023     }
4024   if (cfun->machine->use_rts == 0)
4025     F (emit_insn (m32c_all_frame_related
4026 		  (TARGET_A16
4027 		   ? gen_prologue_enter_16 (GEN_INT (frame_size + 2))
4028 		   : gen_prologue_enter_24 (GEN_INT (frame_size + 4)))));
4029 
4030   if (extra_frame_size)
4031     {
4032       complex_prologue = 1;
4033       if (TARGET_A16)
4034 	F (emit_insn (gen_addhi3 (gen_rtx_REG (HImode, SP_REGNO),
4035 				  gen_rtx_REG (HImode, SP_REGNO),
4036 				  GEN_INT (-extra_frame_size))));
4037       else
4038 	F (emit_insn (gen_addpsi3 (gen_rtx_REG (PSImode, SP_REGNO),
4039 				   gen_rtx_REG (PSImode, SP_REGNO),
4040 				   GEN_INT (-extra_frame_size))));
4041     }
4042 
4043   complex_prologue += m32c_pushm_popm (PP_pushm);
4044 
4045   /* This just emits a comment into the .s file for debugging.  */
4046   if (complex_prologue)
4047     emit_insn (gen_prologue_end ());
4048 }
4049 
4050 /* Likewise, for the epilogue.  The only exception is that, for
4051    interrupts, we must manually unwind the frame as the REIT opcode
4052    doesn't do that.  */
4053 void
4054 m32c_emit_epilogue (void)
4055 {
4056   /* This just emits a comment into the .s file for debugging.  */
4057   if (m32c_pushm_popm (PP_justcount) > 0 || cfun->machine->is_interrupt)
4058     emit_insn (gen_epilogue_start ());
4059 
4060   m32c_pushm_popm (PP_popm);
4061 
4062   if (cfun->machine->is_interrupt)
4063     {
4064       enum machine_mode spmode = TARGET_A16 ? HImode : PSImode;
4065 
4066       /* REIT clears B flag and restores $fp for us, but we still
4067 	 have to fix up the stack.  USE_RTS just means we didn't
4068 	 emit ENTER.  */
4069       if (!cfun->machine->use_rts)
4070 	{
4071 	  emit_move_insn (gen_rtx_REG (spmode, A0_REGNO),
4072 			  gen_rtx_REG (spmode, FP_REGNO));
4073 	  emit_move_insn (gen_rtx_REG (spmode, SP_REGNO),
4074 			  gen_rtx_REG (spmode, A0_REGNO));
4075 	  /* We can't just add this to the POPM because it would be in
4076 	     the wrong order, and wouldn't fix the stack if we're bank
4077 	     switching.  */
4078 	  if (TARGET_A16)
4079 	    emit_insn (gen_pophi_16 (gen_rtx_REG (HImode, FP_REGNO)));
4080 	  else
4081 	    emit_insn (gen_poppsi (gen_rtx_REG (PSImode, FP_REGNO)));
4082 	}
4083       if (!bank_switch_p (cfun->decl) && cfun->machine->intr_pushm)
4084 	emit_insn (gen_popm (GEN_INT (cfun->machine->intr_pushm)));
4085 
4086       /* The FREIT (Fast REturn from InTerrupt) instruction should be
4087          generated only for M32C/M32CM targets (generate the REIT
4088          instruction otherwise).  */
4089       if (fast_interrupt_p (cfun->decl))
4090         {
4091           /* Check if fast_attribute is set for M32C or M32CM.  */
4092           if (TARGET_A24)
4093             {
4094               emit_jump_insn (gen_epilogue_freit ());
4095             }
4096           /* If fast_interrupt attribute is set for an R8C or M16C
4097              target ignore this attribute and generated REIT
4098              instruction.  */
4099           else
4100 	    {
4101 	      warning (OPT_Wattributes,
4102 		       "%<fast_interrupt%> attribute directive ignored");
4103 	      emit_jump_insn (gen_epilogue_reit_16 ());
4104 	    }
4105         }
4106       else if (TARGET_A16)
4107 	emit_jump_insn (gen_epilogue_reit_16 ());
4108       else
4109 	emit_jump_insn (gen_epilogue_reit_24 ());
4110     }
4111   else if (cfun->machine->use_rts)
4112     emit_jump_insn (gen_epilogue_rts ());
4113   else if (TARGET_A16)
4114     emit_jump_insn (gen_epilogue_exitd_16 ());
4115   else
4116     emit_jump_insn (gen_epilogue_exitd_24 ());
4117   emit_barrier ();
4118 }
4119 
4120 void
4121 m32c_emit_eh_epilogue (rtx ret_addr)
4122 {
4123   /* R0[R2] has the stack adjustment.  R1[R3] has the address to
4124      return to.  We have to fudge the stack, pop everything, pop SP
4125      (fudged), and return (fudged).  This is actually easier to do in
4126      assembler, so punt to libgcc.  */
4127   emit_jump_insn (gen_eh_epilogue (ret_addr, cfun->machine->eh_stack_adjust));
4128   /*  emit_clobber (gen_rtx_REG (HImode, R0L_REGNO)); */
4129   emit_barrier ();
4130 }
4131 
4132 /* Indicate which flags must be properly set for a given conditional.  */
4133 static int
4134 flags_needed_for_conditional (rtx cond)
4135 {
4136   switch (GET_CODE (cond))
4137     {
4138     case LE:
4139     case GT:
4140       return FLAGS_OSZ;
4141     case LEU:
4142     case GTU:
4143       return FLAGS_ZC;
4144     case LT:
4145     case GE:
4146       return FLAGS_OS;
4147     case LTU:
4148     case GEU:
4149       return FLAGS_C;
4150     case EQ:
4151     case NE:
4152       return FLAGS_Z;
4153     default:
4154       return FLAGS_N;
4155     }
4156 }
4157 
4158 #define DEBUG_CMP 0
4159 
4160 /* Returns true if a compare insn is redundant because it would only
4161    set flags that are already set correctly.  */
4162 static bool
4163 m32c_compare_redundant (rtx cmp, rtx *operands)
4164 {
4165   int flags_needed;
4166   int pflags;
4167   rtx prev, pp, next;
4168   rtx op0, op1, op2;
4169 #if DEBUG_CMP
4170   int prev_icode, i;
4171 #endif
4172 
4173   op0 = operands[0];
4174   op1 = operands[1];
4175   op2 = operands[2];
4176 
4177 #if DEBUG_CMP
4178   fprintf(stderr, "\n\033[32mm32c_compare_redundant\033[0m\n");
4179   debug_rtx(cmp);
4180   for (i=0; i<2; i++)
4181     {
4182       fprintf(stderr, "operands[%d] = ", i);
4183       debug_rtx(operands[i]);
4184     }
4185 #endif
4186 
4187   next = next_nonnote_insn (cmp);
4188   if (!next || !INSN_P (next))
4189     {
4190 #if DEBUG_CMP
4191       fprintf(stderr, "compare not followed by insn\n");
4192       debug_rtx(next);
4193 #endif
4194       return false;
4195     }
4196   if (GET_CODE (PATTERN (next)) == SET
4197       && GET_CODE (XEXP ( PATTERN (next), 1)) == IF_THEN_ELSE)
4198     {
4199       next = XEXP (XEXP (PATTERN (next), 1), 0);
4200     }
4201   else if (GET_CODE (PATTERN (next)) == SET)
4202     {
4203       /* If this is a conditional, flags_needed will be something
4204 	 other than FLAGS_N, which we test below.  */
4205       next = XEXP (PATTERN (next), 1);
4206     }
4207   else
4208     {
4209 #if DEBUG_CMP
4210       fprintf(stderr, "compare not followed by conditional\n");
4211       debug_rtx(next);
4212 #endif
4213       return false;
4214     }
4215 #if DEBUG_CMP
4216   fprintf(stderr, "conditional is: ");
4217   debug_rtx(next);
4218 #endif
4219 
4220   flags_needed = flags_needed_for_conditional (next);
4221   if (flags_needed == FLAGS_N)
4222     {
4223 #if DEBUG_CMP
4224       fprintf(stderr, "compare not followed by conditional\n");
4225       debug_rtx(next);
4226 #endif
4227       return false;
4228     }
4229 
4230   /* Compare doesn't set overflow and carry the same way that
4231      arithmetic instructions do, so we can't replace those.  */
4232   if (flags_needed & FLAGS_OC)
4233     return false;
4234 
4235   prev = cmp;
4236   do {
4237     prev = prev_nonnote_insn (prev);
4238     if (!prev)
4239       {
4240 #if DEBUG_CMP
4241 	fprintf(stderr, "No previous insn.\n");
4242 #endif
4243 	return false;
4244       }
4245     if (!INSN_P (prev))
4246       {
4247 #if DEBUG_CMP
4248 	fprintf(stderr, "Previous insn is a non-insn.\n");
4249 #endif
4250 	return false;
4251       }
4252     pp = PATTERN (prev);
4253     if (GET_CODE (pp) != SET)
4254       {
4255 #if DEBUG_CMP
4256 	fprintf(stderr, "Previous insn is not a SET.\n");
4257 #endif
4258 	return false;
4259       }
4260     pflags = get_attr_flags (prev);
4261 
4262     /* Looking up attributes of previous insns corrupted the recog
4263        tables.  */
4264     INSN_UID (cmp) = -1;
4265     recog (PATTERN (cmp), cmp, 0);
4266 
4267     if (pflags == FLAGS_N
4268 	&& reg_mentioned_p (op0, pp))
4269       {
4270 #if DEBUG_CMP
4271 	fprintf(stderr, "intermediate non-flags insn uses op:\n");
4272 	debug_rtx(prev);
4273 #endif
4274 	return false;
4275       }
4276 
4277     /* Check for comparisons against memory - between volatiles and
4278        aliases, we just can't risk this one.  */
4279     if (GET_CODE (operands[0]) == MEM
4280 	|| GET_CODE (operands[0]) == MEM)
4281       {
4282 #if DEBUG_CMP
4283 	fprintf(stderr, "comparisons with memory:\n");
4284 	debug_rtx(prev);
4285 #endif
4286 	return false;
4287       }
4288 
4289     /* Check for PREV changing a register that's used to compute a
4290        value in CMP, even if it doesn't otherwise change flags.  */
4291     if (GET_CODE (operands[0]) == REG
4292 	&& rtx_referenced_p (SET_DEST (PATTERN (prev)), operands[0]))
4293       {
4294 #if DEBUG_CMP
4295 	fprintf(stderr, "sub-value affected, op0:\n");
4296 	debug_rtx(prev);
4297 #endif
4298 	return false;
4299       }
4300     if (GET_CODE (operands[1]) == REG
4301 	&& rtx_referenced_p (SET_DEST (PATTERN (prev)), operands[1]))
4302       {
4303 #if DEBUG_CMP
4304 	fprintf(stderr, "sub-value affected, op1:\n");
4305 	debug_rtx(prev);
4306 #endif
4307 	return false;
4308       }
4309 
4310   } while (pflags == FLAGS_N);
4311 #if DEBUG_CMP
4312   fprintf(stderr, "previous flag-setting insn:\n");
4313   debug_rtx(prev);
4314   debug_rtx(pp);
4315 #endif
4316 
4317   if (GET_CODE (pp) == SET
4318       && GET_CODE (XEXP (pp, 0)) == REG
4319       && REGNO (XEXP (pp, 0)) == FLG_REGNO
4320       && GET_CODE (XEXP (pp, 1)) == COMPARE)
4321     {
4322       /* Adjacent cbranches must have the same operands to be
4323 	 redundant.  */
4324       rtx pop0 = XEXP (XEXP (pp, 1), 0);
4325       rtx pop1 = XEXP (XEXP (pp, 1), 1);
4326 #if DEBUG_CMP
4327       fprintf(stderr, "adjacent cbranches\n");
4328       debug_rtx(pop0);
4329       debug_rtx(pop1);
4330 #endif
4331       if (rtx_equal_p (op0, pop0)
4332 	  && rtx_equal_p (op1, pop1))
4333 	return true;
4334 #if DEBUG_CMP
4335       fprintf(stderr, "prev cmp not same\n");
4336 #endif
4337       return false;
4338     }
4339 
4340   /* Else the previous insn must be a SET, with either the source or
4341      dest equal to operands[0], and operands[1] must be zero.  */
4342 
4343   if (!rtx_equal_p (op1, const0_rtx))
4344     {
4345 #if DEBUG_CMP
4346       fprintf(stderr, "operands[1] not const0_rtx\n");
4347 #endif
4348       return false;
4349     }
4350   if (GET_CODE (pp) != SET)
4351     {
4352 #if DEBUG_CMP
4353       fprintf (stderr, "pp not set\n");
4354 #endif
4355       return false;
4356     }
4357   if (!rtx_equal_p (op0, SET_SRC (pp))
4358       && !rtx_equal_p (op0, SET_DEST (pp)))
4359     {
4360 #if DEBUG_CMP
4361       fprintf(stderr, "operands[0] not found in set\n");
4362 #endif
4363       return false;
4364     }
4365 
4366 #if DEBUG_CMP
4367   fprintf(stderr, "cmp flags %x prev flags %x\n", flags_needed, pflags);
4368 #endif
4369   if ((pflags & flags_needed) == flags_needed)
4370     return true;
4371 
4372   return false;
4373 }
4374 
4375 /* Return the pattern for a compare.  This will be commented out if
4376    the compare is redundant, else a normal pattern is returned.  Thus,
4377    the assembler output says where the compare would have been.  */
4378 char *
4379 m32c_output_compare (rtx insn, rtx *operands)
4380 {
4381   static char templ[] = ";cmp.b\t%1,%0";
4382   /*                             ^ 5  */
4383 
4384   templ[5] = " bwll"[GET_MODE_SIZE(GET_MODE(operands[0]))];
4385   if (m32c_compare_redundant (insn, operands))
4386     {
4387 #if DEBUG_CMP
4388       fprintf(stderr, "cbranch: cmp not needed\n");
4389 #endif
4390       return templ;
4391     }
4392 
4393 #if DEBUG_CMP
4394   fprintf(stderr, "cbranch: cmp needed: `%s'\n", templ + 1);
4395 #endif
4396   return templ + 1;
4397 }
4398 
4399 #undef TARGET_ENCODE_SECTION_INFO
4400 #define TARGET_ENCODE_SECTION_INFO m32c_encode_section_info
4401 
4402 /* If the frame pointer isn't used, we detect it manually.  But the
4403    stack pointer doesn't have as flexible addressing as the frame
4404    pointer, so we always assume we have it.  */
4405 
4406 #undef TARGET_FRAME_POINTER_REQUIRED
4407 #define TARGET_FRAME_POINTER_REQUIRED hook_bool_void_true
4408 
4409 /* The Global `targetm' Variable. */
4410 
4411 struct gcc_target targetm = TARGET_INITIALIZER;
4412 
4413 #include "gt-m32c.h"
4414